code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : List[str] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ : int =getattr(UpperCamelCase__, UpperCamelCase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =getattr(UpperCamelCase__, UpperCamelCase__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Any =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : Union[str, Any] =value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : Union[str, Any] =value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : str =value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Optional[int] =value
else:
SCREAMING_SNAKE_CASE__ : Tuple =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
SCREAMING_SNAKE_CASE__ : Dict =fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : List[str] =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Any =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, hf_model.config.feat_extract_norm == '''group''', )
SCREAMING_SNAKE_CASE__ : Optional[int] =True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Optional[int] ='''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE__ : str =True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Any =name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =mapped_key.replace('''*''', UpperCamelCase__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : List[str] ='''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Any ='''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Any ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : Tuple ='''weight'''
else:
SCREAMING_SNAKE_CASE__ : Dict =None
set_recursively(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ : List[str] =name.split('''.''' )
SCREAMING_SNAKE_CASE__ : List[str] =int(items[0] )
SCREAMING_SNAKE_CASE__ : List[str] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE__ : str =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE__ : str =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE__ : Dict =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE__ : str =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Any=True ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str =UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : int =UniSpeechSatConfig()
SCREAMING_SNAKE_CASE__ : Dict =''''''
if is_finetuned:
SCREAMING_SNAKE_CASE__ : Optional[Any] =UniSpeechSatForCTC(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : List[str] =UniSpeechSatForPreTraining(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model[0].eval()
recursively_load_weights(UpperCamelCase__, UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Dict , **__lowercase : List[str] ) -> List[str]:
requires_backends(self , ['''bs4'''] )
super().__init__(**__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =[]
SCREAMING_SNAKE_CASE__ : Dict =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE__ : Dict =parent.find_all(child.name , recursive=__lowercase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowercase ) else next(i for i, s in enumerate(__lowercase , 1 ) if s is child ) )
SCREAMING_SNAKE_CASE__ : str =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __magic_name__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =BeautifulSoup(__lowercase , '''html.parser''' )
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : Any =[]
for element in html_code.descendants:
if type(__lowercase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE__ : Tuple =html.unescape(__lowercase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.xpath_soup(__lowercase )
stringaxtag_seq.append(__lowercase )
stringaxsubs_seq.append(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__lowercase ) != len(__lowercase ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __magic_name__ ( self : Optional[Any] , __lowercase : Tuple , __lowercase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =''''''
for tagname, subs in zip(__lowercase , __lowercase ):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self : Any , __lowercase : int ) -> BatchFeature:
SCREAMING_SNAKE_CASE__ : Tuple =False
# Check that strings has a valid type
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =True
elif isinstance(__lowercase , (list, tuple) ):
if len(__lowercase ) == 0 or isinstance(html_strings[0] , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"but is of type {type(__lowercase )}." )
SCREAMING_SNAKE_CASE__ : List[Any] =bool(isinstance(__lowercase , (list, tuple) ) and (isinstance(html_strings[0] , __lowercase )) )
if not is_batched:
SCREAMING_SNAKE_CASE__ : List[Any] =[html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE__ : Dict =[]
for html_string in html_strings:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_three_from_single(__lowercase )
nodes.append(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
for node, tag_list, sub_list in zip(__lowercase , __lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.construct_xpath(__lowercase , __lowercase )
xpath_strings.append(__lowercase )
xpaths.append(__lowercase )
# return as Dict
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''nodes''': nodes, '''xpaths''': xpaths}
SCREAMING_SNAKE_CASE__ : Optional[int] =BatchFeature(data=__lowercase , tensor_type=__lowercase )
return encoded_inputs
| 665 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str]=[] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE__ : Any =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.ones((size_y, size_x), dtype=np.uinta ) * 2_5_5
SCREAMING_SNAKE_CASE__ : List[Any] =np.pad(UpperCamelCase__, mode='''linear_ramp''', pad_width=UpperCamelCase__, end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE__ : Optional[int] =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE__ : str =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE__ : List[str] =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, UpperCamelCase__ : Dict ):
'''simple docstring'''
return max(UpperCamelCase__, min(UpperCamelCase__, UpperCamelCase__ ) )
def _a( UpperCamelCase__ : [int], UpperCamelCase__ : [int], UpperCamelCase__ : [int] ):
'''simple docstring'''
return (
clamp(rect[0], min[0], max[0] ),
clamp(rect[1], min[1], max[1] ),
clamp(rect[2], min[0], max[0] ),
clamp(rect[3], min[1], max[1] ),
)
def _a( UpperCamelCase__ : [int], UpperCamelCase__ : int, UpperCamelCase__ : [int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =list(UpperCamelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE__ : Tuple =clamp_rect(UpperCamelCase__, [0, 0], [image_size[0], image_size[1]] )
return rect
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.new('''RGB''', (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ), (0, 0), )
result.paste(UpperCamelCase__, (original_slice, 0) )
return result
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tile.crop(UpperCamelCase__ )
return tile
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =n % d
return n - divisor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Any , __lowercase : AutoencoderKL , __lowercase : CLIPTextModel , __lowercase : CLIPTokenizer , __lowercase : UNetaDConditionModel , __lowercase : DDPMScheduler , __lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowercase : int = 3_50 , ) -> Optional[int]:
super().__init__(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , max_noise_level=__lowercase , )
def __magic_name__ ( self : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Any , **__lowercase : List[Any] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE__ : Any =add_overlap_rect(__lowercase , __lowercase , image.size )
SCREAMING_SNAKE_CASE__ : Any =image.crop(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE__ : int =translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(0 , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =squeeze_tile(__lowercase , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : int =to_input.size
SCREAMING_SNAKE_CASE__ : Tuple =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE__ : Dict =super(__lowercase , self ).__call__(image=__lowercase , **__lowercase ).images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE__ : Dict =unsqueeze_tile(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE__ : str =[]
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowercase ) , mode='''L''' , )
final_image.paste(
__lowercase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowercase )
@torch.no_grad()
def __call__( self : Any , __lowercase : Union[str, List[str]] , __lowercase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowercase : int = 75 , __lowercase : float = 9.0 , __lowercase : int = 50 , __lowercase : Optional[Union[str, List[str]]] = None , __lowercase : Optional[int] = 1 , __lowercase : float = 0.0 , __lowercase : Optional[torch.Generator] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , __lowercase : int = 1_28 , __lowercase : int = 32 , __lowercase : int = 32 , ) -> int:
SCREAMING_SNAKE_CASE__ : Any =Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE__ : str =math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE__ : int =math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE__ : List[str] =tcx * tcy
SCREAMING_SNAKE_CASE__ : List[str] =0
for y in range(__lowercase ):
for x in range(__lowercase ):
self._process_tile(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , prompt=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , noise_level=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ='''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE__ : Any =StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase__, revision='''fp16''', torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[str] =pipe.to('''cuda''' )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(UpperCamelCase__ : int ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save('''diffusers_library_progress.jpg''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe(image=UpperCamelCase__, prompt='''Black font, white background, vector''', noise_level=4_0, callback=UpperCamelCase__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if isinstance(UpperCamelCase__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : int , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Optional[int] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : str =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : int =do_resize
SCREAMING_SNAKE_CASE__ : Tuple =size
SCREAMING_SNAKE_CASE__ : int =do_center_crop
SCREAMING_SNAKE_CASE__ : Dict =crop_size
SCREAMING_SNAKE_CASE__ : Tuple =resample
SCREAMING_SNAKE_CASE__ : List[Any] =do_rescale
SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] =offset
SCREAMING_SNAKE_CASE__ : Tuple =do_normalize
SCREAMING_SNAKE_CASE__ : List[str] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE__ : Optional[int] =get_resize_output_image_size(__lowercase , size['''shortest_edge'''] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE__ : Any =(size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : bool = True , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE__ : int =image - (scale / 2)
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Dict , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] =to_numpy_array(__lowercase )
if do_resize:
SCREAMING_SNAKE_CASE__ : str =self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] =self.rescale(image=__lowercase , scale=__lowercase , offset=__lowercase )
if do_normalize:
SCREAMING_SNAKE_CASE__ : int =self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =to_channel_dimension_format(__lowercase , __lowercase )
return image
def __magic_name__ ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : Dict , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Dict =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Any =offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE__ : List[Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[int] =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : str =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , param_name='''crop_size''' )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =make_batched(__lowercase )
SCREAMING_SNAKE_CASE__ : int =[
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , offset=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE__ : Tuple ={'''pixel_values''': videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 665 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 1 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ : int =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE__ : int =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Tuple =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : List[Any] =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : int =VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : str =Text('''CPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Any =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE__ : Dict =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Text('''GPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Dict =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Text('''Model''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =[]
SCREAMING_SNAKE_CASE__ : Tuple =[]
for i, rect in enumerate(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =fill.copy().set_fill(__lowercase , opacity=0.8 )
target.move_to(__lowercase )
model_arr.append(__lowercase )
SCREAMING_SNAKE_CASE__ : str =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
SCREAMING_SNAKE_CASE__ : int =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : List[Any] =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Optional[Any] =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : List[str] =VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
SCREAMING_SNAKE_CASE__ : int =Text('''Disk''' , font_size=24 )
SCREAMING_SNAKE_CASE__ : Any =Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
SCREAMING_SNAKE_CASE__ : str =MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Square(0.3 )
input.set_fill(__lowercase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowercase , buff=0.5 )
self.play(Write(__lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowercase , buff=0.02 )
self.play(MoveToTarget(__lowercase ) )
self.play(FadeOut(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =Arrow(start=__lowercase , end=__lowercase , color=__lowercase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowercase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE__ : Any =MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
SCREAMING_SNAKE_CASE__ : Tuple ={'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowercase ) , Circumscribe(model_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_cpu_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowercase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AnimationGroup(
FadeOut(__lowercase , run_time=0.5 ) , MoveToTarget(__lowercase , run_time=0.5 ) , FadeIn(__lowercase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE__ : Optional[int] =0.7
self.play(
Circumscribe(model_arr[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_arr[i + 1] , color=__lowercase , **__lowercase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowercase , **__lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE__ : List[Any] =a_c
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowercase ) , FadeOut(__lowercase , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE__ : str =MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) , MoveToTarget(__lowercase ) )
self.wait()
| 665 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : set[int] =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE__ : set[int] =set()
return any(
node not in visited and depth_first_search(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
for node in graph )
def _a( UpperCamelCase__ : dict, UpperCamelCase__ : int, UpperCamelCase__ : set, UpperCamelCase__ : set ):
'''simple docstring'''
visited.add(UpperCamelCase__ )
rec_stk.add(UpperCamelCase__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 1 |
'''simple docstring'''
import qiskit
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE__ : Optional[int] =qiskit.QuantumCircuit(UpperCamelCase__, UpperCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE__ : int =qiskit.execute(UpperCamelCase__, UpperCamelCase__, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 665 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _a( UpperCamelCase__ : int, UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : Any=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =True, True
SCREAMING_SNAKE_CASE__ : Optional[Any] =dfs(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return path
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Dict =-1
for i in range(UpperCamelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE__ : List[str] =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =check_circuit_or_path(UpperCamelCase__, UpperCamelCase__ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] =1
if check == 2:
SCREAMING_SNAKE_CASE__ : Optional[Any] =odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dfs(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
print(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE__ : Optional[Any] ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE__ : List[str] ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE__ : List[Any] ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE__ : Optional[int] ={
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE__ : int =1_0
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
a_ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
a_ = {
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =(
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ : List[str] =bs[:]
SCREAMING_SNAKE_CASE__ : Any =0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ : int =[chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__, UpperCamelCase__ ) )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =set()
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : int =char
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : List[Any]="replace" , __lowercase : Tuple="<s>" , __lowercase : List[Any]="</s>" , __lowercase : Any="</s>" , __lowercase : Dict="<s>" , __lowercase : Dict="<unk>" , __lowercase : Optional[Any]="<pad>" , __lowercase : Optional[int]="<mask>" , __lowercase : Union[str, Any]=False , **__lowercase : str , ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
SCREAMING_SNAKE_CASE__ : int =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Dict =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : str =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Dict =errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ : Optional[int] =bytes_to_unicode()
SCREAMING_SNAKE_CASE__ : List[str] ={v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : int =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : Optional[int] =[tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={}
SCREAMING_SNAKE_CASE__ : Optional[Any] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ : Optional[Any] =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> Tuple:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Any =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_pairs(__lowercase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : Any =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =bigram
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : List[Any] =0
while i < len(__lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Any =word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Optional[int] =j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : Tuple =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =new_word
if len(__lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =get_pairs(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =''' '''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =word
return word
def __magic_name__ ( self : Dict , __lowercase : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =[]
for token in re.findall(self.pat , __lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) )
return bpe_tokens
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Tuple , __lowercase : Dict ) -> Any:
return self.decoder.get(__lowercase )
def __magic_name__ ( self : Any , __lowercase : str ) -> Any:
SCREAMING_SNAKE_CASE__ : List[Any] =''''''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : int =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __magic_name__ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Dict =token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str =[self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : Tuple =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Optional[int]=False , **__lowercase : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ : int =''' ''' + text
return (text, kwargs)
| 665 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0**9 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
SCREAMING_SNAKE_CASE__ : int =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE__ : List[str] =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : str = None , __lowercase : uuid.UUID = None , __lowercase : Dict=None , __lowercase : Dict=None ) -> Dict:
if not conversation_id:
SCREAMING_SNAKE_CASE__ : int =uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : Any =[]
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
SCREAMING_SNAKE_CASE__ : uuid.UUID =conversation_id
SCREAMING_SNAKE_CASE__ : List[str] =past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] =generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] =text
def __eq__( self : List[str] , __lowercase : List[Any] ) -> str:
if not isinstance(__lowercase , __lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __magic_name__ ( self : Any , __lowercase : str , __lowercase : bool = False ) -> Union[str, Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
SCREAMING_SNAKE_CASE__ : int =text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =text
def __magic_name__ ( self : int ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : Tuple =None
def __magic_name__ ( self : Tuple , __lowercase : str ) -> List[str]:
self.generated_responses.append(__lowercase )
def __magic_name__ ( self : List[Any] ) -> Tuple:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : List[str] ='''user''' if is_user else '''bot'''
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
lowerCamelCase , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[Any] , *__lowercase : str , **__lowercase : Dict ) -> Tuple:
super().__init__(*__lowercase , **__lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : Dict =self.tokenizer.eos_token
def __magic_name__ ( self : Optional[Any] , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : Optional[int]=None , **__lowercase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
SCREAMING_SNAKE_CASE__ : Any ={}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : List[str] =min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : List[Any] =generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , __lowercase : Union[Conversation, List[Conversation]] , __lowercase : Dict=0 , **__lowercase : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =super().__call__(__lowercase , num_workers=__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) == 1:
return outputs[0]
return outputs
def __magic_name__ ( self : List[str] , __lowercase : Conversation , __lowercase : Tuple=32 ) -> Dict[str, Any]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Any =self.tokenizer._build_conversation_input_ids(__lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : List[str] =self._legacy_parse_and_tokenize(__lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : int =torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Any =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Dict=10 , **__lowercase : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] =generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
SCREAMING_SNAKE_CASE__ : List[str] =max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Dict =model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Tuple =model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : int =model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : List[Any] =max_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model.generate(**__lowercase , **__lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int =1
else:
SCREAMING_SNAKE_CASE__ : int =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : Dict=True ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : Optional[int] =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__lowercase )
return conversation
def __magic_name__ ( self : str , __lowercase : Conversation ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
if len(__lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __lowercase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any =data
SCREAMING_SNAKE_CASE__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] =None
SCREAMING_SNAKE_CASE__ : List[str] =None
def __iter__( self : List[str] ) -> Iterator[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE__ : List[Any] =node.next
if node == self.head:
break
def __len__( self : Tuple ) -> int:
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> Tuple:
return "->".join(str(__lowercase ) for item in iter(self ) )
def __magic_name__ ( self : Tuple , __lowercase : Any ) -> None:
self.insert_nth(len(self ) , __lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : Any ) -> None:
self.insert_nth(0 , __lowercase )
def __magic_name__ ( self : Any , __lowercase : int , __lowercase : Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE__ : str =Node(__lowercase )
if self.head is None:
SCREAMING_SNAKE_CASE__ : int =new_node # first node points itself
SCREAMING_SNAKE_CASE__ : str =new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE__ : Any =self.head
SCREAMING_SNAKE_CASE__ : str =new_node
else:
SCREAMING_SNAKE_CASE__ : Any =self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =temp.next
SCREAMING_SNAKE_CASE__ : int =temp.next
SCREAMING_SNAKE_CASE__ : Optional[int] =new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE__ : Dict =new_node
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
return self.delete_nth(0 )
def __magic_name__ ( self : List[Any] ) -> Any:
return self.delete_nth(len(self ) - 1 )
def __magic_name__ ( self : Any , __lowercase : int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE__ : Dict =self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE__ : List[str] =None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE__ : str =self.tail.next.next
SCREAMING_SNAKE_CASE__ : Any =self.head.next
else:
SCREAMING_SNAKE_CASE__ : int =self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ : List[Any] =temp.next
SCREAMING_SNAKE_CASE__ : Optional[Any] =temp.next
SCREAMING_SNAKE_CASE__ : Union[str, Any] =temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE__ : Tuple =temp
return delete_node.data
def __magic_name__ ( self : Any ) -> bool:
return len(self ) == 0
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__, i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1, 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1, 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0, 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1, 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2, 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1, 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__ : int =[
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple ={
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__ : Optional[int] =sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__ : Any =sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__ : List[Any] =key.replace('''.qkv_proj.''', '''.q_proj.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =key.replace('''.qkv_proj.''', '''.k_proj.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =key.replace('''.qkv_proj.''', '''.v_proj.''' )
SCREAMING_SNAKE_CASE__ : List[str] =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =torch.split(UpperCamelCase__, depth // 3, dim=0 )
SCREAMING_SNAKE_CASE__ : Any =q
SCREAMING_SNAKE_CASE__ : Any =k
SCREAMING_SNAKE_CASE__ : Optional[int] =v
del sd[key]
return sd
@torch.no_grad()
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =load_checkpoint(UpperCamelCase__ )
if config is not None:
SCREAMING_SNAKE_CASE__ : Tuple =OPTConfig.from_pretrained(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : str =OPTConfig()
SCREAMING_SNAKE_CASE__ : Dict =OPTModel(UpperCamelCase__ ).half().eval()
model.load_state_dict(UpperCamelCase__ )
# Check results
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
a_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =[False] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =[-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__, 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__, 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 665 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : Tuple , __lowercase : Optional[Any]=3 , __lowercase : Union[str, Any]=32 , __lowercase : Optional[Any]=3 , __lowercase : Optional[int]=10 , __lowercase : List[str]=[10, 20, 30, 40] , __lowercase : Union[str, Any]=[1, 1, 2, 1] , __lowercase : Optional[int]=True , __lowercase : List[Any]=True , __lowercase : Optional[int]="relu" , __lowercase : Optional[Any]=3 , __lowercase : str=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =parent
SCREAMING_SNAKE_CASE__ : Optional[Any] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : Tuple =num_channels
SCREAMING_SNAKE_CASE__ : List[str] =embeddings_size
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_sizes
SCREAMING_SNAKE_CASE__ : Any =depths
SCREAMING_SNAKE_CASE__ : Union[str, Any] =is_training
SCREAMING_SNAKE_CASE__ : Optional[int] =use_labels
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =scope
SCREAMING_SNAKE_CASE__ : Tuple =len(__lowercase )
def __magic_name__ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Dict =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =TFRegNetModel(config=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , training=__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self : int , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.num_labels
SCREAMING_SNAKE_CASE__ : List[str] =TFRegNetForImageClassification(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , labels=__lowercase , training=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
snake_case_ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict =ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __magic_name__ ( self : List[str] ) -> str:
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : int ) -> int:
pass
def __magic_name__ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
def check_hidden_states_output(__lowercase : List[str] , __lowercase : List[Any] , __lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ : List[Any] =layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : List[str]={} ):
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , return_dict=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase , return_dict=__lowercase , **__lowercase ).to_tuple()
def recursive_check(__lowercase : str , __lowercase : Dict ):
if isinstance(__lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowercase , __lowercase ):
recursive_check(__lowercase , __lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowercase , __lowercase ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(__lowercase , __lowercase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : int =self._prepare_for_class(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self._prepare_for_class(__lowercase , __lowercase )
check_equivalence(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
SCREAMING_SNAKE_CASE__ : int =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
check_equivalence(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self._prepare_for_class(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._prepare_for_class(__lowercase , __lowercase )
check_equivalence(__lowercase , __lowercase , __lowercase , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
check_equivalence(__lowercase , __lowercase , __lowercase , {'''output_hidden_states''': True} )
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def __magic_name__ ( self : Dict ) -> Optional[int]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict =TFRegNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> str:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ : Any =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_img()
SCREAMING_SNAKE_CASE__ : Dict =image_processor(images=__lowercase , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE__ : List[Any] =model(**__lowercase , training=__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple =tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : int =tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
| 665 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =AlbertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict(), UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : str=None, UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case_ = OPTConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : Optional[Any] , __lowercase : str , __lowercase : Tuple=13 , __lowercase : Optional[Any]=7 , __lowercase : List[str]=True , __lowercase : Any=False , __lowercase : Union[str, Any]=99 , __lowercase : str=16 , __lowercase : Union[str, Any]=2 , __lowercase : Tuple=4 , __lowercase : Dict=4 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.1 , __lowercase : Tuple=0.1 , __lowercase : Any=20 , __lowercase : Any=2 , __lowercase : List[Any]=1 , __lowercase : Any=0 , __lowercase : Any=16 , __lowercase : Dict=16 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =parent
SCREAMING_SNAKE_CASE__ : str =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] =is_training
SCREAMING_SNAKE_CASE__ : str =use_labels
SCREAMING_SNAKE_CASE__ : Any =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =eos_token_id
SCREAMING_SNAKE_CASE__ : Tuple =pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] =bos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] =embed_dim
SCREAMING_SNAKE_CASE__ : Dict =word_embed_proj_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] =False
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Any =tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : List[Any] =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowercase , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_opt_inputs_dict(__lowercase , __lowercase )
return config, inputs_dict
def __magic_name__ ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTModel(config=__lowercase )
SCREAMING_SNAKE_CASE__ : int =inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] =1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Any =ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : List[str] =tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase , attention_mask=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : str =output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case_ = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 10
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTModelTester(self )
SCREAMING_SNAKE_CASE__ : Any =ConfigTester(self , config_class=__lowercase )
def __magic_name__ ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowercase : List[str] , __lowercase : Union[str, Any] ):
if hasattr(__lowercase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowercase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE__ : str =model_class(config=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =_get_word_embedding_weight(__lowercase , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ : int =_get_word_embedding_weight(__lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =_get_word_embedding_weight(__lowercase , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ : int =_get_word_embedding_weight(__lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __lowercase )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] =False
self.assertTrue(__lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ : Any =False
self.assertTrue(__lowercase )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
return tf.constant(UpperCamelCase__, dtype=tf.intaa )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = 99
def __magic_name__ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str =tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE__ : Any =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE__ : List[str] =input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Dict =OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __magic_name__ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TFOPTModel.from_pretrained('''facebook/opt-350m''' )
SCREAMING_SNAKE_CASE__ : Any =_long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ : Tuple =tf.not_equal(__lowercase , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE__ : List[str] =model(input_ids=__lowercase , attention_mask=__lowercase ).last_hidden_state
SCREAMING_SNAKE_CASE__ : Optional[int] =(1, 11, 5_12)
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __lowercase , atol=4e-3 ) )
SCREAMING_SNAKE_CASE__ : str =tf.function(__lowercase , jit_compile=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =xla_generate(__lowercase , __lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __lowercase , atol=4e-2 ) )
@require_tf
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
super().setUp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''facebook/opt-350m'''
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ : Dict =[
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase , return_tensors='''tf''' , padding=__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Tuple =tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE__ : Tuple =tf.function(__lowercase , jit_compile=__lowercase )
SCREAMING_SNAKE_CASE__ : int =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-4 ) )
@require_tf
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : int ='''facebook/opt-125m'''
SCREAMING_SNAKE_CASE__ : Any =[
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : Tuple =GPTaTokenizer.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTForCausalLM.from_pretrained(__lowercase )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ : Dict =tokenizer(__lowercase , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ : Dict =model.generate(__lowercase , max_length=10 )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any ='''facebook/opt-350m'''
SCREAMING_SNAKE_CASE__ : List[Any] =GPTaTokenizer.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFOPTForCausalLM.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ='''left'''
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Optional[int] =[
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE__ : int =tokenizer(__lowercase , return_tensors='''tf''' , padding=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Dict =model.generate(input_ids=__lowercase , attention_mask=inputs['''attention_mask'''] )
SCREAMING_SNAKE_CASE__ : Any =tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ : List[str] =model.generate(input_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : int =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE__ : int =tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ : Any =model.generate(input_ids=__lowercase , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.decode(output_padded[0] , skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =[
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , [non_padded_sentence, padded_sentence] )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] ='''facebook/opt-350m'''
SCREAMING_SNAKE_CASE__ : List[str] =[
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =GPTaTokenizer.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =TFOPTForCausalLM.from_pretrained(__lowercase )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ : str =tokenizer(__lowercase , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ : int =model.generate(__lowercase , max_length=10 )
SCREAMING_SNAKE_CASE__ : int =tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase , __lowercase )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __lowercase : int | None = None ) -> int:
SCREAMING_SNAKE_CASE__ : Any =value
SCREAMING_SNAKE_CASE__ : str =random()
SCREAMING_SNAKE_CASE__ : Node | None =None
SCREAMING_SNAKE_CASE__ : Node | None =None
def __repr__( self : List[Any] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ : int =str(self.value ) + ''' '''
SCREAMING_SNAKE_CASE__ : str =str(self.left or '''''' )
SCREAMING_SNAKE_CASE__ : Tuple =str(self.right or '''''' )
return value + left + right
def _a( UpperCamelCase__ : Node | None, UpperCamelCase__ : int ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =split(root.left, UpperCamelCase__ )
return left, root
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =split(root.right, UpperCamelCase__ )
return root, right
def _a( UpperCamelCase__ : Node | None, UpperCamelCase__ : Node | None ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
SCREAMING_SNAKE_CASE__ : List[Any] =merge(left.right, UpperCamelCase__ )
return left
else:
SCREAMING_SNAKE_CASE__ : int =merge(UpperCamelCase__, right.left )
return right
def _a( UpperCamelCase__ : Node | None, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =Node(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =split(UpperCamelCase__, UpperCamelCase__ )
return merge(merge(UpperCamelCase__, UpperCamelCase__ ), UpperCamelCase__ )
def _a( UpperCamelCase__ : Node | None, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =split(UpperCamelCase__, value - 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =split(UpperCamelCase__, UpperCamelCase__ )
return merge(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Node | None ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value, end=''',''' )
inorder(root.right )
def _a( UpperCamelCase__ : Node | None, UpperCamelCase__ : str ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
SCREAMING_SNAKE_CASE__ : List[str] =insert(UpperCamelCase__, int(arg[1:] ) )
elif arg[0] == "-":
SCREAMING_SNAKE_CASE__ : List[Any] =erase(UpperCamelCase__, int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
SCREAMING_SNAKE_CASE__ : List[str] =input()
while args != "q":
SCREAMING_SNAKE_CASE__ : Any =interact_treap(UpperCamelCase__, UpperCamelCase__ )
print(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DPTFeatureExtractor']
a_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """vit_msn"""
def __init__( self : Optional[Any] , __lowercase : Tuple=7_68 , __lowercase : Tuple=12 , __lowercase : List[Any]=12 , __lowercase : List[str]=30_72 , __lowercase : int="gelu" , __lowercase : List[Any]=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : List[Any]=0.02 , __lowercase : List[str]=1e-06 , __lowercase : Union[str, Any]=2_24 , __lowercase : Optional[int]=16 , __lowercase : Union[str, Any]=3 , __lowercase : List[Any]=True , **__lowercase : Union[str, Any] , ) -> str:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : str =intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] =image_size
SCREAMING_SNAKE_CASE__ : Tuple =patch_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : List[str] =qkv_bias
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TapasConfig.from_json_file(UpperCamelCase__ )
# set absolute/relative position embeddings parameter
SCREAMING_SNAKE_CASE__ : List[Any] =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
SCREAMING_SNAKE_CASE__ : str =TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE__ : List[str] =4
SCREAMING_SNAKE_CASE__ : Tuple =True
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE__ : Dict =0.6_6_4_6_9_4
SCREAMING_SNAKE_CASE__ : List[Any] =0.2_0_7_9_5_1
SCREAMING_SNAKE_CASE__ : Optional[int] =0.1_2_1_1_9_4
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =True
SCREAMING_SNAKE_CASE__ : int =False
SCREAMING_SNAKE_CASE__ : Dict =0.0_3_5_2_5_1_3
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE__ : Any =4
SCREAMING_SNAKE_CASE__ : str =False
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE__ : List[str] =3_6.4_5_1_9
SCREAMING_SNAKE_CASE__ : Optional[int] =0.9_0_3_4_2_1
SCREAMING_SNAKE_CASE__ : int =2_2_2.0_8_8
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : Dict =0.7_6_3_1_4_1
SCREAMING_SNAKE_CASE__ : str =TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "TABFACT":
SCREAMING_SNAKE_CASE__ : List[Any] =TapasForSequenceClassification(config=UpperCamelCase__ )
elif task == "MLM":
SCREAMING_SNAKE_CASE__ : Optional[int] =TapasForMaskedLM(config=UpperCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
SCREAMING_SNAKE_CASE__ : Optional[int] =TapasModel(config=UpperCamelCase__ )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase__ )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
SCREAMING_SNAKE_CASE__ : int =TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''', model_max_length=5_1_2 )
tokenizer.save_pretrained(UpperCamelCase__ )
print('''Used relative position embeddings:''', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
a_ = True
except (ImportError, ModuleNotFoundError):
a_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
re.sub('''<n>''', '''''', UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 665 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 1 |
'''simple docstring'''
import functools
def _a( UpperCamelCase__ : list[int], UpperCamelCase__ : list[int] ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ) or not all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(UpperCamelCase__ ) != 3 or not all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(UpperCamelCase__ ) == 0:
return 0
if min(UpperCamelCase__ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(UpperCamelCase__ ) >= 3_6_6:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE__ : Dict =set(UpperCamelCase__ )
@functools.cache
def dynamic_programming(UpperCamelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 3_0 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =[0] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCamelCase__ ) ):
if indegree[i] == 0:
queue.append(UpperCamelCase__ )
while queue:
SCREAMING_SNAKE_CASE__ : int =queue.pop(0 )
cnt += 1
topo.append(UpperCamelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(UpperCamelCase__ )
if cnt != len(UpperCamelCase__ ):
print('''Cycle exists''' )
else:
print(UpperCamelCase__ )
# Adjacency List of Graph
a_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a( UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 1_8, 2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE__ : int =True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE__ : Optional[int] =True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE__ : int =[3, 3, 3, 3]
SCREAMING_SNAKE_CASE__ : Optional[int] =[5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE__ : str =[4, 4, 4, 4]
SCREAMING_SNAKE_CASE__ : Optional[int] =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE__ : List[str] =[3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =[3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE__ : List[Any] =[2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : Dict =9_6
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Dict =9_6
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : List[str] =1_2_8
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple =1_9_2
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =2_5_6
elif "huge" in model_name:
SCREAMING_SNAKE_CASE__ : str =3_5_2
# set label information
SCREAMING_SNAKE_CASE__ : str ='''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE__ : Dict ='''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE__ : Dict ='''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ : Tuple =json.load(open(hf_hub_download(UpperCamelCase__, UpperCamelCase__, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE__ : int ={int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] =FocalNetConfig(
embed_dim=UpperCamelCase__, depths=UpperCamelCase__, focal_levels=UpperCamelCase__, focal_windows=UpperCamelCase__, use_conv_embed=UpperCamelCase__, idalabel=UpperCamelCase__, labelaid=UpperCamelCase__, use_post_layernorm=UpperCamelCase__, use_layerscale=UpperCamelCase__, )
return config
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ : str =name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ : Tuple =name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE__ : Tuple ='''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE__ : Any =name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE__ : Tuple =name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] =name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE__ : List[Any] =name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE__ : Tuple =name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE__ : Tuple =name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE__ : Dict ='''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Optional[int] ='''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE__ : List[Any] =name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE__ : Any ='''focalnet.''' + name
return name
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Any, UpperCamelCase__ : List[Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any ={
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_name_to_url[model_name]
print('''Checkpoint URL: ''', UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.hub.load_state_dict_from_url(UpperCamelCase__, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Dict =state_dict.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =val
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_focalnet_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
SCREAMING_SNAKE_CASE__ : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : str =BitImageProcessor(
do_resize=UpperCamelCase__, size={'''shortest_edge''': 2_5_6}, resample=PILImageResampling.BILINEAR, do_center_crop=UpperCamelCase__, crop_size=2_2_4, do_normalize=UpperCamelCase__, image_mean=UpperCamelCase__, image_std=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple =processor(images=UpperCamelCase__, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE__ : int =image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, UpperCamelCase__, atol=1e-4 )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE__ : Any =torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
a_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 665 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Tuple ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : Optional[Any] =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__, keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Dict ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Dict =features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : str =(
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Any =ParquetDatasetReader(UpperCamelCase__, features=UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Any ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : int =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__, split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if issubclass(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parquet_path
elif issubclass(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =[parquet_path]
SCREAMING_SNAKE_CASE__ : Optional[int] =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Optional[int] =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str]=("train",) ):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
for split in splits:
SCREAMING_SNAKE_CASE__ : Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : int ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : List[Any] =ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=UpperCamelCase__, keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : List[str] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Dict =features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : List[Any] =(
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : List[Any] =ParquetDatasetReader({'''train''': parquet_path}, features=UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__, UpperCamelCase__ )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={split: parquet_path}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''train'''
SCREAMING_SNAKE_CASE__ : str ={'''train''': parquet_path, '''test''': parquet_path}
SCREAMING_SNAKE_CASE__ : Any =tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] =ParquetDatasetReader(UpperCamelCase__, cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__, UpperCamelCase__, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =ParquetDatasetWriter(UpperCamelCase__, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE__ : List[Any] =pq.ParquetFile(tmp_path / '''foo.parquet''' )
SCREAMING_SNAKE_CASE__ : Any =pf.read()
assert dataset.data.table == output_table
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =str(shared_datadir / '''test_image_rgb.jpg''' )
SCREAMING_SNAKE_CASE__ : List[Any] ={'''image''': [image_path]}
SCREAMING_SNAKE_CASE__ : Optional[Any] =Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE__ : List[Any] =Dataset.from_dict(UpperCamelCase__, features=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =ParquetDatasetWriter(UpperCamelCase__, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE__ : List[Any] =Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int = 0, UpperCamelCase__ : int = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =right or len(UpperCamelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCamelCase__, UpperCamelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 1 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
a_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ = 'allenai'
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =dict((re.sub(R'''@@$''', '''''', UpperCamelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''', '''</w>''', UpperCamelCase__ ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE__ : List[str] ='''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
SCREAMING_SNAKE_CASE__ : List[Any] =d[k] # restore
return da
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
assert os.path.exists(UpperCamelCase__ )
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
SCREAMING_SNAKE_CASE__ : Tuple =basename(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =dirname(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE__ : List[Any] =cls.hub_models()
SCREAMING_SNAKE_CASE__ : List[Any] ={'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
SCREAMING_SNAKE_CASE__ : Dict ='''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}" )
SCREAMING_SNAKE_CASE__ : Any =hub_utils.from_pretrained(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, archive_map=UpperCamelCase__, **UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =vars(chkpt['''args''']['''model'''] )
SCREAMING_SNAKE_CASE__ : str =args['''source_lang''']
SCREAMING_SNAKE_CASE__ : List[Any] =args['''target_lang''']
SCREAMING_SNAKE_CASE__ : Dict =dirname(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =basename(UpperCamelCase__ )
# dicts
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, f"dict.{src_lang}.txt" )
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, f"dict.{tgt_lang}.txt" )
SCREAMING_SNAKE_CASE__ : Tuple =Dictionary.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE__ : List[str] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(UpperCamelCase__, '''vocab-src.json''' )
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, ensure_ascii=UpperCamelCase__, indent=UpperCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE__ : Optional[int] =True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE__ : int =False
break
SCREAMING_SNAKE_CASE__ : Dict =Dictionary.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''vocab-tgt.json''' )
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, ensure_ascii=UpperCamelCase__, indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE__ : Any =os.path.join(UpperCamelCase__, VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =os.path.join(UpperCamelCase__, UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
break
with open(UpperCamelCase__, encoding='''utf-8''' ) as fin:
SCREAMING_SNAKE_CASE__ : Optional[Any] =fin.read()
SCREAMING_SNAKE_CASE__ : Any =re.sub(R''' \d+$''', '''''', UpperCamelCase__, 0, re.M ) # remove frequency number
print(f"Generating {merges_file}" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as fout:
fout.write(UpperCamelCase__ )
# model config
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(UpperCamelCase__, '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
SCREAMING_SNAKE_CASE__ : List[Any] ={
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.0_2,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE__ : Optional[int] =5
SCREAMING_SNAKE_CASE__ : Optional[Any] =False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE__ : List[str] =best_score_hparams[model_dir]['''length_penalty''']
else:
SCREAMING_SNAKE_CASE__ : List[str] =1.0
print(f"Generating {fsmt_model_config_file}" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, ensure_ascii=UpperCamelCase__, indent=UpperCamelCase__ ) )
# tokenizer config
SCREAMING_SNAKE_CASE__ : Union[str, Any] =os.path.join(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] ={
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1_0_2_4,
'''do_lower_case''': do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, ensure_ascii=UpperCamelCase__, indent=UpperCamelCase__ ) )
# model
SCREAMING_SNAKE_CASE__ : Optional[Any] =chkpt['''models'''][0]
SCREAMING_SNAKE_CASE__ : Any =model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE__ : Optional[Any] =OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE__ : Tuple =[
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =FSMTConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =FSMTForConditionalGeneration(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
# save
SCREAMING_SNAKE_CASE__ : Any =os.path.join(UpperCamelCase__, UpperCamelCase__ )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(UpperCamelCase__, UpperCamelCase__ )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f"cd {data_root}" )
print(f"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 665 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 1 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
a_ = get_tests_dir('fixtures/vocab.json')
a_ = get_tests_dir('fixtures')
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : str ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Tuple =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : str =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , __lowercase ) )
copyfile(__lowercase , os.path.join(__lowercase , '''vocab.json''' ) )
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE__ : List[Any] =WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowercase , __lowercase ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : str =json.load(__lowercase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowercase , __lowercase ) , '''w''' ) as f:
f.write(json.dumps(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : int =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE__ : int =WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowercase , __lowercase ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =json.load(__lowercase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowercase , __lowercase ) , '''w''' ) as f:
f.write(json.dumps(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(__lowercase )
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(__lowercase , __lowercase ) , '''w''' ) as f:
f.write('''{}''' )
SCREAMING_SNAKE_CASE__ : str =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
SCREAMING_SNAKE_CASE__ : Dict =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase , use_fast=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __magic_name__ ( self : str ) -> Union[str, Any]:
try:
AutoConfig.register('''custom''' , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoProcessor.register(__lowercase , __lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : Any =CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Any =os.path.join(__lowercase , '''vocab.txt''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CustomTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : str ) -> Dict:
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = False
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = False
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """AutoFeatureExtractor"""
snake_case_ = """AutoTokenizer"""
snake_case_ = False
try:
AutoConfig.register('''custom''' , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Tuple =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Any =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : str =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __magic_name__ ( cls : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def __magic_name__ ( cls : List[str] ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __magic_name__ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ : Any =WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , '''test-processor''' ) , push_to_hub=__lowercase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : int =WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , '''test-processor-org''' ) , push_to_hub=__lowercase , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE__ : Any =WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any =CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(__lowercase , '''vocab.txt''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str =CustomTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : int =CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
SCREAMING_SNAKE_CASE__ : List[str] =Repository(__lowercase , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(__lowercase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =json.load(__lowercase )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowercase , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , '''custom_processing.py''' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a( ):
'''simple docstring'''
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 665 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : List[str] , *__lowercase : Any , __lowercase : Optional[Any]=None , __lowercase : int=None , **__lowercase : str ) -> List[str]:
super().__init__(*__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =eval_examples
SCREAMING_SNAKE_CASE__ : Dict =post_process_function
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[Dataset] = None , __lowercase : str=None , __lowercase : Optional[List[str]] = None , __lowercase : str = "eval" , **__lowercase : List[Any] , ) -> Dict[str, float]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE__ : List[Any] =(
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE__ : List[str] =gen_kwargs
SCREAMING_SNAKE_CASE__ : Any =self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_eval_dataloader(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ : int =self.compute_metrics
SCREAMING_SNAKE_CASE__ : Any =None
SCREAMING_SNAKE_CASE__ : str =time.time()
SCREAMING_SNAKE_CASE__ : List[Any] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ : Tuple =eval_loop(
__lowercase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
SCREAMING_SNAKE_CASE__ : Any =compute_metrics
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE__ : List[str] =self.post_process_function(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE__ : str =metrics.pop(__lowercase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE__ : Any =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE__ : List[str] =self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowercase )
return metrics
def __magic_name__ ( self : Any , __lowercase : Any , __lowercase : List[str] , __lowercase : Optional[int]=None , __lowercase : str = "test" , **__lowercase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str =gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_test_dataloader(__lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ : int =self.compute_metrics
SCREAMING_SNAKE_CASE__ : int =None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =time.time()
SCREAMING_SNAKE_CASE__ : List[str] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] =eval_loop(
__lowercase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
SCREAMING_SNAKE_CASE__ : Tuple =compute_metrics
SCREAMING_SNAKE_CASE__ : str =self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE__ : Tuple =self.post_process_function(__lowercase , __lowercase , __lowercase , '''predict''' )
SCREAMING_SNAKE_CASE__ : Tuple =self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE__ : Dict =metrics.pop(__lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowercase )
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list[int | str] ):
'''simple docstring'''
create_state_space_tree(UpperCamelCase__, [], 0, [0 for i in range(len(UpperCamelCase__ ) )] )
def _a( UpperCamelCase__ : list[int | str], UpperCamelCase__ : list[int | str], UpperCamelCase__ : int, UpperCamelCase__ : list[int], ):
'''simple docstring'''
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
for i in range(len(UpperCamelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE__ : List[Any] =True
create_state_space_tree(UpperCamelCase__, UpperCamelCase__, index + 1, UpperCamelCase__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE__ : Tuple =False
a_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
a_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 665 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return "".join(chr(ord(UpperCamelCase__ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Tuple=1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any]=1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[]
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, '''schedule.bin''' )
torch.save(scheduler.state_dict(), UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : List[str] ) -> int:
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for a, b in zip(__lowercase , __lowercase ):
self.assertAlmostEqual(__lowercase , __lowercase , delta=__lowercase )
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE__ : Dict =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE__ : Optional[int] =AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_00 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =criterion(__lowercase , __lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE__ : List[str] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE__ : Dict =Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowercase , weight_decay=0.0 , relative_step=__lowercase , scale_parameter=__lowercase , warmup_init=__lowercase , )
for _ in range(10_00 ):
SCREAMING_SNAKE_CASE__ : List[Any] =criterion(__lowercase , __lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = nn.Linear(50 , 50 ) if is_torch_available() else None
snake_case_ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
snake_case_ = 10
def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int]=None ) -> Union[str, Any]:
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for a, b in zip(__lowercase , __lowercase ):
self.assertAlmostEqual(__lowercase , __lowercase , delta=__lowercase , msg=__lowercase )
def __magic_name__ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : int ={'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE__ : Any ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =data
SCREAMING_SNAKE_CASE__ : List[Any] =scheduler_func(self.optimizer , **__lowercase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE__ : List[str] =unwrap_schedule(__lowercase , self.num_steps )
self.assertListAlmostEqual(
__lowercase , __lowercase , tol=1e-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE__ : str =scheduler_func(self.optimizer , **__lowercase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowercase ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE__ : Union[str, Any] =unwrap_and_save_reload_schedule(__lowercase , self.num_steps )
self.assertListEqual(__lowercase , __lowercase , msg=F"failed for {scheduler_func} in save and reload" )
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str =fn
def __call__( self : Union[str, Any] , *__lowercase : Any , **__lowercase : Optional[Any] ) -> int:
return self.fn(*__lowercase , **__lowercase )
@classmethod
def __magic_name__ ( self : Tuple , __lowercase : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(map(self , scheduler.lr_lambdas ) )
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : list[list[float]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[list[float]] =[]
for data in source_data:
for i, el in enumerate(UpperCamelCase__ ):
if len(UpperCamelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase__ ) )
return data_lists
def _a( UpperCamelCase__ : list[list[float]], UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[list[float]] =[]
for dlist, weight in zip(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =min(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =max(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : list[float] =[]
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ : List[str] =f"Invalid weight of {weight:f} provided"
raise ValueError(UpperCamelCase__ )
score_lists.append(UpperCamelCase__ )
return score_lists
def _a( UpperCamelCase__ : list[list[float]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[float] =[0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =final_scores[j] + ele
return final_scores
def _a( UpperCamelCase__ : list[list[float]], UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =get_data(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =calculate_each_score(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =generate_final_scores(UpperCamelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCamelCase__ ):
source_data[i].append(UpperCamelCase__ )
return source_data
| 665 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
def __magic_name__ ( self : int , __lowercase : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE__ : Any =[tuple(__lowercase ) if isinstance(__lowercase , __lowercase ) else key for key in keys]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Counter(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : Optional[Any]=False ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =super().construct_mapping(__lowercase , deep=__lowercase )
self._check_no_duplicates_on_constructed_node(__lowercase )
return mapping
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE__ : str =full_content[1:].index('''---''' ) + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
# class attributes
snake_case_ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __magic_name__ ( cls : str , __lowercase : Path ) -> "DatasetMetadata":
with open(__lowercase , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowercase )
else:
return cls()
def __magic_name__ ( self : Optional[int] , __lowercase : Path ) -> Optional[Any]:
if path.exists():
with open(__lowercase , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE__ : List[Any] =readme_file.read()
else:
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : Dict =self._to_readme(__lowercase )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__lowercase )
def __magic_name__ ( self : int , __lowercase : Optional[str] = None ) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =_split_yaml_from_readme(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
SCREAMING_SNAKE_CASE__ : Dict ='''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __magic_name__ ( cls : Any , __lowercase : str ) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE__ : Optional[int] =yaml.load(__lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE__ : Optional[int] ={
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowercase )
def __magic_name__ ( self : Dict ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowercase , allow_unicode=__lowercase , encoding='''utf-8''' , ).decode('''utf-8''' )
a_ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
a_ = ap.parse_args()
a_ = Path(args.readme_filepath)
a_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 665 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0**1_2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =1
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : int =1
SCREAMING_SNAKE_CASE__ : Tuple =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
SCREAMING_SNAKE_CASE__ : Dict =parser.parse_args()
if not hasattr(UpperCamelCase__, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =set()
# edges = list of graph's edges
SCREAMING_SNAKE_CASE__ : str =get_edges(UpperCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =edges.pop()
chosen_vertices.add(UpperCamelCase__ )
chosen_vertices.add(UpperCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCamelCase__ )
return chosen_vertices
def _a( UpperCamelCase__ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 665 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , __lowercase : Tuple=7_68 , __lowercase : Union[str, Any]=12 , __lowercase : int=12 , __lowercase : Union[str, Any]=30_72 , __lowercase : List[str]="gelu" , __lowercase : Union[str, Any]=0.0 , __lowercase : int=0.0 , __lowercase : Optional[int]=0.02 , __lowercase : Union[str, Any]=1e-12 , __lowercase : Dict=16 , __lowercase : Tuple=True , __lowercase : Union[str, Any]=10 , __lowercase : Any=10 , __lowercase : str=10_24 , __lowercase : Dict=1_28 , **__lowercase : Dict , ) -> int:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Any =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict =intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Any =qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] =frequency_stride
SCREAMING_SNAKE_CASE__ : int =time_stride
SCREAMING_SNAKE_CASE__ : str =max_length
SCREAMING_SNAKE_CASE__ : List[Any] =num_mel_bins
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['YolosFeatureExtractor']
a_ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """BlipImageProcessor"""
snake_case_ = """AutoTokenizer"""
def __init__( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[int] ) -> Any:
super().__init__(__lowercase , __lowercase )
# add QFormer tokenizer
SCREAMING_SNAKE_CASE__ : Dict =qformer_tokenizer
def __call__( self : List[Any] , __lowercase : ImageInput = None , __lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowercase : bool = True , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Union[bool, str, TruncationStrategy] = None , __lowercase : Optional[int] = None , __lowercase : int = 0 , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = True , __lowercase : Optional[Union[str, TensorType]] = None , **__lowercase : str , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
SCREAMING_SNAKE_CASE__ : Any =BatchFeature()
if text is not None:
SCREAMING_SNAKE_CASE__ : List[str] =self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_token_type_ids=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
encoding.update(__lowercase )
SCREAMING_SNAKE_CASE__ : int =self.qformer_tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_token_type_ids=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : List[Any] =qformer_text_encoding.pop('''input_ids''' )
SCREAMING_SNAKE_CASE__ : str =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
SCREAMING_SNAKE_CASE__ : Tuple =self.image_processor(__lowercase , return_tensors=__lowercase )
encoding.update(__lowercase )
return encoding
def __magic_name__ ( self : Any , *__lowercase : Tuple , **__lowercase : Optional[int] ) -> Optional[int]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __magic_name__ ( self : Union[str, Any] , *__lowercase : str , **__lowercase : List[str] ) -> Optional[int]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __magic_name__ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ : str =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Tuple =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] , **__lowercase : Optional[Any] ) -> Any:
if os.path.isfile(__lowercase ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__lowercase , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(__lowercase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__lowercase )
return super().save_pretrained(__lowercase , **__lowercase )
@classmethod
def __magic_name__ ( cls : List[Any] , __lowercase : Dict , **__lowercase : int ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained(__lowercase , subfolder='''qformer_tokenizer''' )
SCREAMING_SNAKE_CASE__ : Tuple =cls._get_arguments_from_pretrained(__lowercase , **__lowercase )
args.append(__lowercase )
return cls(*__lowercase )
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 1 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a_ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 665 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 1 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Accelerator, UpperCamelCase__ : int = 1_6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : int =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Dict =datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : List[Any] =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : str =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str =1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Optional[Any] =8
else:
SCREAMING_SNAKE_CASE__ : Dict =None
return tokenizer.pad(
UpperCamelCase__, padding='''longest''', max_length=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Optional[int] =DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__, drop_last=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : int =config['''lr''']
SCREAMING_SNAKE_CASE__ : Optional[int] =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Any =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Any =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : str =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : str =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Dict =MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =get_dataloaders(UpperCamelCase__, UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Any =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : List[str] =AdamW(params=model.parameters(), lr=UpperCamelCase__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : int =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=1_0_0, num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =outputs.loss
SCREAMING_SNAKE_CASE__ : Union[str, Any] =loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__, references=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : int =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCamelCase__, default=UpperCamelCase__, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _a( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Optional[str], UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tesseract_config if tesseract_config is not None else ''''''
# apply OCR
SCREAMING_SNAKE_CASE__ : Tuple =to_pil_image(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =pil_image.size
SCREAMING_SNAKE_CASE__ : str =pytesseract.image_to_data(UpperCamelCase__, lang=UpperCamelCase__, output_type='''dict''', config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Tuple =[idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Any =[word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : str =[coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
for x, y, w, h in zip(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Dict =[x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : Any , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "" , **__lowercase : List[str] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =do_resize
SCREAMING_SNAKE_CASE__ : Any =size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =resample
SCREAMING_SNAKE_CASE__ : Optional[int] =apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] =ocr_lang
SCREAMING_SNAKE_CASE__ : Optional[int] =tesseract_config
def __magic_name__ ( self : List[str] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : List[Any] =(size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : str , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ : Any =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase )
SCREAMING_SNAKE_CASE__ : int =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] =apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : int =ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Optional[int] =tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : List[str] =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : int =[to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
SCREAMING_SNAKE_CASE__ : int =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Optional[int] =[flip_channel_order(__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : int =BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =words_batch
SCREAMING_SNAKE_CASE__ : Optional[int] =boxes_batch
return data
| 665 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """"""
snake_case_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Union[str, Any] , __lowercase : Optional[DatasetInfo] = None , __lowercase : Optional[str] = None , **__lowercase : List[Any] , ) -> Union[str, Any]:
super().__init__(self , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =repo_info
SCREAMING_SNAKE_CASE__ : List[str] =token
SCREAMING_SNAKE_CASE__ : Any =None
def __magic_name__ ( self : Tuple ) -> str:
if self.dir_cache is None:
SCREAMING_SNAKE_CASE__ : Dict ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE__ : Any ={
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowercase ): {'''name''': str(__lowercase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Union[str, Any] , __lowercase : str , __lowercase : str = "rb" , **__lowercase : Optional[Any] , ) -> int:
if not isinstance(self.repo_info , __lowercase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
SCREAMING_SNAKE_CASE__ : int =hf_hub_url(self.repo_info.id , __lowercase , revision=self.repo_info.sha )
return fsspec.open(
__lowercase , mode=__lowercase , headers=get_authentication_headers_for_url(__lowercase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __magic_name__ ( self : int , __lowercase : Optional[int] , **__lowercase : Any ) -> Union[str, Any]:
self._get_dirs()
SCREAMING_SNAKE_CASE__ : Dict =self._strip_protocol(__lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowercase )
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : List[Any]=False , **__lowercase : str ) -> Tuple:
self._get_dirs()
SCREAMING_SNAKE_CASE__ : Optional[Any] =PurePosixPath(path.strip('''/''' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE__ : str =PurePosixPath(p.strip('''/''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =p.parent
if root == path:
SCREAMING_SNAKE_CASE__ : Optional[Any] =f
SCREAMING_SNAKE_CASE__ : List[Any] =list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 665 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = 42
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : list[list[Edge]] =[[] for _ in range(__lowercase )]
SCREAMING_SNAKE_CASE__ : Dict =size
def __getitem__( self : Dict , __lowercase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __magic_name__ ( self : List[str] ) -> str:
return self._size
def __magic_name__ ( self : Tuple , __lowercase : int , __lowercase : int , __lowercase : int ) -> List[Any]:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(__lowercase , __lowercase ) )
def __magic_name__ ( self : str , __lowercase : int , __lowercase : int ) -> int | None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =deque([start_vertex] )
SCREAMING_SNAKE_CASE__ : list[int | None] =[None] * self.size
SCREAMING_SNAKE_CASE__ : Dict =0
while queue:
SCREAMING_SNAKE_CASE__ : List[str] =queue.popleft()
SCREAMING_SNAKE_CASE__ : Optional[Any] =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE__ : str =current_distance + edge.weight
SCREAMING_SNAKE_CASE__ : Optional[int] =distances[edge.destination_vertex]
if (
isinstance(__lowercase , __lowercase )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE__ : List[Any] =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
snake_case_ = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
snake_case_ = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
snake_case_ = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
snake_case_ = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
snake_case_ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
snake_case_ = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case_ = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] ={}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE__ : List[str] =self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE__ : Dict =self.validation_dir
SCREAMING_SNAKE_CASE__ : int =data_files if data_files else None
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
snake_case_ = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
snake_case_ = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
snake_case_ = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
snake_case_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case_ = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
snake_case_ = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
snake_case_ = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
snake_case_ = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''', UpperCamelCase__, UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Dict =training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : List[str] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
SCREAMING_SNAKE_CASE__ : Tuple =load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE__ : Optional[int] =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, UpperCamelCase__ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE__ : List[Any] =ds['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE__ : Optional[Any] =split['''train''']
SCREAMING_SNAKE_CASE__ : Optional[int] =split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Optional[int] ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =ViTMAEConfig.from_pretrained(model_args.config_name, **UpperCamelCase__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : str =ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple =ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE__ : Tuple =ViTImageProcessor.from_pretrained(model_args.image_processor_name, **UpperCamelCase__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Any =ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Any =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=UpperCamelCase__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('''Training new model from scratch''' )
SCREAMING_SNAKE_CASE__ : str =ViTMAEForPreTraining(UpperCamelCase__ )
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ds['''train'''].column_names
else:
SCREAMING_SNAKE_CASE__ : Dict =ds['''validation'''].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''image'''
elif "img" in column_names:
SCREAMING_SNAKE_CASE__ : Tuple ='''img'''
else:
SCREAMING_SNAKE_CASE__ : str =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE__ : Any =image_processor.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ : int =(image_processor.size['''height'''], image_processor.size['''width'''])
SCREAMING_SNAKE_CASE__ : Tuple =Compose(
[
Lambda(lambda UpperCamelCase__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCamelCase__, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
def preprocess_images(UpperCamelCase__ : Dict ):
SCREAMING_SNAKE_CASE__ : int =[transforms(UpperCamelCase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Tuple =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : Tuple =(
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase__ )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE__ : str =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
SCREAMING_SNAKE_CASE__ : List[Any] =Trainer(
model=UpperCamelCase__, args=UpperCamelCase__, train_dataset=ds['''train'''] if training_args.do_train else None, eval_dataset=ds['''validation'''] if training_args.do_eval else None, tokenizer=UpperCamelCase__, data_collator=UpperCamelCase__, )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : str =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Dict =last_checkpoint
SCREAMING_SNAKE_CASE__ : Union[str, Any] =trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : List[Any] =trainer.evaluate()
trainer.log_metrics('''eval''', UpperCamelCase__ )
trainer.save_metrics('''eval''', UpperCamelCase__ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ : Dict ={
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _a( UpperCamelCase__ : ndarray ):
'''simple docstring'''
return np.dot(UpperCamelCase__, UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , *,
__lowercase : float = np.inf , __lowercase : str = "linear" , __lowercase : float = 0.0 , ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =regularization
SCREAMING_SNAKE_CASE__ : Optional[Any] =gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE__ : List[Any] =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
SCREAMING_SNAKE_CASE__ : List[str] =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE__ : List[str] =F"Unknown kernel: {kernel}"
raise ValueError(__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : ndarray , __lowercase : ndarray ) -> float:
return np.dot(__lowercase , __lowercase )
def __magic_name__ ( self : Tuple , __lowercase : ndarray , __lowercase : ndarray ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __magic_name__ ( self : Optional[int] , __lowercase : list[ndarray] , __lowercase : ndarray ) -> None:
SCREAMING_SNAKE_CASE__ : Tuple =observations
SCREAMING_SNAKE_CASE__ : Union[str, Any] =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE__) , ) : Union[str, Any] =np.shape(__lowercase )
def to_minimize(__lowercase : ndarray ) -> float:
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
((SCREAMING_SNAKE_CASE__) , ) : str =np.shape(__lowercase )
for i in range(__lowercase ):
for j in range(__lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =LinearConstraint(__lowercase , 0 , 0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE__ : Dict =minimize(
__lowercase , np.ones(__lowercase ) , bounds=__lowercase , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE__ : List[Any] =l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE__ : Optional[int] =0
for i in range(__lowercase ):
for j in range(__lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE__ : List[Any] =s / n
def __magic_name__ ( self : Union[str, Any] , __lowercase : ndarray ) -> int:
SCREAMING_SNAKE_CASE__ : Any =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['OwlViTFeatureExtractor']
a_ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __magic_name__ ( self : int , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] =TextaTextGenerationPipeline(model=__lowercase , tokenizer=__lowercase )
return generator, ["Something to write", "Something else"]
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =generator('''Something there''' )
self.assertEqual(__lowercase , [{'''generated_text''': ANY(__lowercase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
SCREAMING_SNAKE_CASE__ : str =generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{'''generated_text''': ANY(__lowercase )}, {'''generated_text''': ANY(__lowercase )}],
[{'''generated_text''': ANY(__lowercase )}, {'''generated_text''': ANY(__lowercase )}],
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{'''generated_text''': ANY(__lowercase )}, {'''generated_text''': ANY(__lowercase )}],
[{'''generated_text''': ANY(__lowercase )}, {'''generated_text''': ANY(__lowercase )}],
] , )
with self.assertRaises(__lowercase ):
generator(4 )
@require_torch
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE__ : Tuple =generator('''Something there''' , do_sample=__lowercase )
self.assertEqual(__lowercase , [{'''generated_text''': ''''''}] )
SCREAMING_SNAKE_CASE__ : int =3
SCREAMING_SNAKE_CASE__ : Optional[int] =generator(
'''Something there''' , num_return_sequences=__lowercase , num_beams=__lowercase , )
SCREAMING_SNAKE_CASE__ : List[Any] =[
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =generator('''This is a test''' , do_sample=__lowercase , num_return_sequences=2 , return_tensors=__lowercase )
self.assertEqual(
__lowercase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
SCREAMING_SNAKE_CASE__ : Any =generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Tuple =generator(
['''This is a test''', '''This is a second test'''] , do_sample=__lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowercase , )
self.assertEqual(
__lowercase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def __magic_name__ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE__ : int =generator('''Something there''' , do_sample=__lowercase )
self.assertEqual(__lowercase , [{'''generated_text''': ''''''}] )
| 665 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 1 |
'''simple docstring'''
import argparse
a_ = 'docs/source/_static/js/custom.js'
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[int] =0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
SCREAMING_SNAKE_CASE__ : Tuple =f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(UpperCamelCase__, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
a_ = parser.parse_args()
update_custom_js(args.version)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case_ = 42
snake_case_ = jnp.floataa
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , __lowercase : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =hidden_states.shape
SCREAMING_SNAKE_CASE__ : Optional[Any] =jax.image.resize(
__lowercase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.conv(__lowercase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case_ = 42
snake_case_ = jnp.floataa
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __lowercase : Optional[int] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE__ : int =self.conv(__lowercase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case_ = 42
snake_case_ = None
snake_case_ = 0.0
snake_case_ = None
snake_case_ = jnp.floataa
def __magic_name__ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE__ : str =nn.Conv(
__lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Dense(__lowercase , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : str =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE__ : str =nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.Conv(
__lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ : str =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE__ : Any =None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE__ : List[Any] =nn.Conv(
__lowercase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : int , __lowercase : List[Any] , __lowercase : Dict , __lowercase : List[Any]=True ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =hidden_states
SCREAMING_SNAKE_CASE__ : List[str] =self.norma(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.swish(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.conva(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.time_emb_proj(nn.swish(__lowercase ) )
SCREAMING_SNAKE_CASE__ : int =jnp.expand_dims(jnp.expand_dims(__lowercase , 1 ) , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_states + temb
SCREAMING_SNAKE_CASE__ : Dict =self.norma(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =nn.swish(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.dropout(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.conva(__lowercase )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.conv_shortcut(__lowercase )
return hidden_states + residual
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : list ):
'''simple docstring'''
def merge(UpperCamelCase__ : list, UpperCamelCase__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 665 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
# TODO: upload to AWS
a_ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """retribert"""
def __init__( self : Any , __lowercase : Dict=3_05_22 , __lowercase : Tuple=7_68 , __lowercase : Any=8 , __lowercase : Dict=12 , __lowercase : Any=30_72 , __lowercase : Optional[int]="gelu" , __lowercase : List[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Optional[Any]=5_12 , __lowercase : List[Any]=2 , __lowercase : Any=0.02 , __lowercase : str=1e-12 , __lowercase : List[str]=True , __lowercase : Optional[Any]=1_28 , __lowercase : Optional[int]=0 , **__lowercase : Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : str =vocab_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : int =share_encoders
SCREAMING_SNAKE_CASE__ : Union[str, Any] =projection_dim
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
SCREAMING_SNAKE_CASE__ : int =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
SCREAMING_SNAKE_CASE__ : str ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
SCREAMING_SNAKE_CASE__ : Any =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(self.tmpdirname , __lowercase )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
# load decoder from hub
SCREAMING_SNAKE_CASE__ : Tuple ='''hf-internal-testing/ngram-beam-search-decoder'''
def __magic_name__ ( self : str , **__lowercase : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =self.add_kwargs_tokens_map.copy()
kwargs.update(__lowercase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def __magic_name__ ( self : int , **__lowercase : Dict ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowercase )
def __magic_name__ ( self : int , **__lowercase : int ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowercase )
def __magic_name__ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_decoder()
SCREAMING_SNAKE_CASE__ : List[str] =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Optional[Any] =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowercase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __lowercase )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ : List[str] =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
SCREAMING_SNAKE_CASE__ : Tuple =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __magic_name__ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__lowercase , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__lowercase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str =self.get_decoder()
SCREAMING_SNAKE_CASE__ : Tuple =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =floats_list((3, 10_00) )
SCREAMING_SNAKE_CASE__ : Dict =feature_extractor(__lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =processor(__lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] =self.get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] ='''This is a test string'''
SCREAMING_SNAKE_CASE__ : Any =processor(text=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : List[str] , __lowercase : Tuple=(2, 10, 16) , __lowercase : Union[str, Any]=77 ) -> str:
np.random.seed(__lowercase )
return np.random.rand(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_decoder()
SCREAMING_SNAKE_CASE__ : str =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._get_dummy_logits(shape=(10, 16) , seed=13 )
SCREAMING_SNAKE_CASE__ : int =processor.decode(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =decoder.decode_beams(__lowercase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __magic_name__ ( self : int , __lowercase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_decoder()
SCREAMING_SNAKE_CASE__ : List[str] =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
SCREAMING_SNAKE_CASE__ : List[str] =processor.batch_decode(__lowercase )
else:
with get_context(__lowercase ).Pool() as pool:
SCREAMING_SNAKE_CASE__ : int =processor.batch_decode(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =list(__lowercase )
with get_context('''fork''' ).Pool() as p:
SCREAMING_SNAKE_CASE__ : Dict =decoder.decode_beams_batch(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__lowercase , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__lowercase , decoded_processor.logit_score )
self.assertListEqual(__lowercase , decoded_processor.lm_score )
def __magic_name__ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict =self.get_decoder()
SCREAMING_SNAKE_CASE__ : int =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ : int =15
SCREAMING_SNAKE_CASE__ : Union[str, Any] =-20.0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =-4.0
SCREAMING_SNAKE_CASE__ : str =processor.batch_decode(
__lowercase , beam_width=__lowercase , beam_prune_logp=__lowercase , token_min_logp=__lowercase , )
SCREAMING_SNAKE_CASE__ : str =decoded_processor_out.text
SCREAMING_SNAKE_CASE__ : Optional[int] =list(__lowercase )
with get_context('''fork''' ).Pool() as pool:
SCREAMING_SNAKE_CASE__ : List[Any] =decoder.decode_beams_batch(
__lowercase , __lowercase , beam_width=__lowercase , beam_prune_logp=__lowercase , token_min_logp=__lowercase , )
SCREAMING_SNAKE_CASE__ : str =[d[0][0] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE__ : Optional[int] =[d[0][2] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE__ : Tuple =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __lowercase )
self.assertTrue(np.array_equal(__lowercase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __lowercase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__lowercase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __lowercase , atol=1e-3 ) )
def __magic_name__ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_decoder()
SCREAMING_SNAKE_CASE__ : Any =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
SCREAMING_SNAKE_CASE__ : int =self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ : Tuple =2.0
SCREAMING_SNAKE_CASE__ : List[str] =5.0
SCREAMING_SNAKE_CASE__ : Optional[Any] =-20.0
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Dict =processor.batch_decode(
__lowercase , alpha=__lowercase , beta=__lowercase , unk_score_offset=__lowercase , lm_score_boundary=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =decoded_processor_out.text
SCREAMING_SNAKE_CASE__ : int =list(__lowercase )
decoder.reset_params(
alpha=__lowercase , beta=__lowercase , unk_score_offset=__lowercase , lm_score_boundary=__lowercase , )
with get_context('''fork''' ).Pool() as pool:
SCREAMING_SNAKE_CASE__ : int =decoder.decode_beams_batch(
__lowercase , __lowercase , )
SCREAMING_SNAKE_CASE__ : List[str] =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE__ : List[str] =processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE__ : Tuple =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE__ : Any =os.listdir(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Any =snapshot_download('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE__ : Tuple =WavaVecaProcessorWithLM.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE__ : List[Any] =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE__ : int =os.listdir(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =os.listdir(__lowercase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE__ : Any =floats_list((3, 10_00) )
SCREAMING_SNAKE_CASE__ : Optional[int] =processor_wavaveca(__lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : List[str] =processor_auto(__lowercase , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
SCREAMING_SNAKE_CASE__ : Tuple =self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ : Any =processor_wavaveca.batch_decode(__lowercase )
SCREAMING_SNAKE_CASE__ : int =processor_auto.batch_decode(__lowercase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __magic_name__ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any =self.get_decoder()
SCREAMING_SNAKE_CASE__ : List[str] =WavaVecaProcessorWithLM(tokenizer=__lowercase , feature_extractor=__lowercase , decoder=__lowercase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __magic_name__ ( __lowercase : int , __lowercase : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =[d[key] for d in offsets]
return retrieved_list
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE__ : List[Any] =self._get_dummy_logits()[0]
SCREAMING_SNAKE_CASE__ : str =processor.decode(__lowercase , output_word_offsets=__lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__lowercase , __lowercase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __magic_name__ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE__ : Dict =self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ : Optional[Any] =processor.batch_decode(__lowercase , output_word_offsets=__lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__lowercase , __lowercase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__lowercase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __magic_name__ ( self : int ) -> Optional[Any]:
import torch
SCREAMING_SNAKE_CASE__ : int =load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =iter(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =next(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
SCREAMING_SNAKE_CASE__ : Any =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
SCREAMING_SNAKE_CASE__ : str =processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase ).logits.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] =processor.decode(logits[0] , output_word_offsets=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__lowercase , '''word''' ) ) , __lowercase )
self.assertEqual(''' '''.join(self.get_from_offsets(__lowercase , '''word''' ) ) , output.text )
# output times
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.tensor(self.get_from_offsets(__lowercase , '''start_time''' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.tensor(self.get_from_offsets(__lowercase , '''end_time''' ) )
# fmt: off
SCREAMING_SNAKE_CASE__ : Any =torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=0.01 ) )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=0.01 ) )
| 665 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0_0_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =2**power
SCREAMING_SNAKE_CASE__ : str =str(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =0
for i in list_num:
sum_of_num += int(UpperCamelCase__ )
return sum_of_num
if __name__ == "__main__":
a_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
a_ = solution(power)
print('Sum of the digits is: ', result)
| 665 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
def __magic_name__ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] =Path(__lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : int =Path(__lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(__lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : List[str] =Path(__lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Any =CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : Optional[int] =Path(__lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : str =Path(__lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(__lowercase ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPImageProcessor(**__lowercase )
# save in new folder
model_config.save_pretrained(__lowercase )
config.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(__lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : Any =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(__lowercase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''' )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__ ( self : Dict ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
SCREAMING_SNAKE_CASE__ : int =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __magic_name__ ( self : int ) -> Any:
try:
AutoConfig.register('''custom''' , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoImageProcessor.register(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[int] =Path(__lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Dict =Path(__lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =CustomImageProcessor.from_pretrained(__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : str ) -> str:
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = True
try:
AutoConfig.register('''custom''' , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__lowercase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ = 1.6021E-19 # units = C
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
a_ = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
a_ = '▁'
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BigBirdTokenizer
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = []
def __init__( self : List[Any] , __lowercase : Tuple=None , __lowercase : Optional[Any]=None , __lowercase : str="<unk>" , __lowercase : Dict="<s>" , __lowercase : Dict="</s>" , __lowercase : List[Any]="<pad>" , __lowercase : Union[str, Any]="[SEP]" , __lowercase : Tuple="[MASK]" , __lowercase : Optional[Any]="[CLS]" , **__lowercase : List[Any] , ) -> int:
SCREAMING_SNAKE_CASE__ : int =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
SCREAMING_SNAKE_CASE__ : List[str] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
SCREAMING_SNAKE_CASE__ : int =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
SCREAMING_SNAKE_CASE__ : Optional[int] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Optional[int] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_file
SCREAMING_SNAKE_CASE__ : Any =False if not self.vocab_file else True
def __magic_name__ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def __magic_name__ ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : int =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 665 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _a( UpperCamelCase__ : Features ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =np.inf
def set_batch_size(UpperCamelCase__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : str =min(UpperCamelCase__, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =min(UpperCamelCase__, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE__ : Dict =min(UpperCamelCase__, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCamelCase__, UpperCamelCase__ )
return None if batch_size is np.inf else batch_size
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Any , __lowercase : NestedDataStructureLike[PathLike] , __lowercase : Optional[NamedSplit] = None , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : str , ) -> Optional[int]:
super().__init__(
__lowercase , split=__lowercase , features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , streaming=__lowercase , num_proc=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : Dict =path_or_paths if isinstance(__lowercase , __lowercase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =_PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE__ : List[str] =Parquet(
cache_dir=__lowercase , data_files=__lowercase , features=__lowercase , hash=__lowercase , **__lowercase , )
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ : Tuple =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ : Any =None
SCREAMING_SNAKE_CASE__ : Any =None
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : Tuple =None
self.builder.download_and_prepare(
download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ : Dict =self.builder.as_dataset(
split=self.split , verification_mode=__lowercase , in_memory=self.keep_in_memory )
return dataset
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Dataset , __lowercase : Union[PathLike, BinaryIO] , __lowercase : Optional[int] = None , **__lowercase : int , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =dataset
SCREAMING_SNAKE_CASE__ : str =path_or_buf
SCREAMING_SNAKE_CASE__ : int =batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE__ : Optional[Any] =parquet_writer_kwargs
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._write(file_obj=__lowercase , batch_size=__lowercase , **self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE__ : int =self._write(file_obj=self.path_or_buf , batch_size=__lowercase , **self.parquet_writer_kwargs )
return written
def __magic_name__ ( self : List[Any] , __lowercase : BinaryIO , __lowercase : int , **__lowercase : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =parquet_writer_kwargs.pop('''path_or_buf''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE__ : List[Any] =pq.ParquetWriter(__lowercase , schema=__lowercase , **__lowercase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowercase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE__ : Dict =query_table(
table=self.dataset._data , key=slice(__lowercase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowercase )
written += batch.nbytes
writer.close()
return written
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , __lowercase : List[str] , __lowercase : Tuple=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : List[str]=30 , __lowercase : Dict=4_00 , __lowercase : str=True , __lowercase : Dict=None , __lowercase : Optional[int]=True , ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : Tuple =parent
SCREAMING_SNAKE_CASE__ : Optional[int] =batch_size
SCREAMING_SNAKE_CASE__ : str =num_channels
SCREAMING_SNAKE_CASE__ : List[str] =image_size
SCREAMING_SNAKE_CASE__ : Tuple =min_resolution
SCREAMING_SNAKE_CASE__ : Any =max_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] =do_resize
SCREAMING_SNAKE_CASE__ : Dict =size
SCREAMING_SNAKE_CASE__ : Optional[Any] =apply_ocr
def __magic_name__ ( self : List[Any] ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =LayoutLMvaImageProcessingTester(self )
@property
def __magic_name__ ( self : Tuple ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''apply_ocr''' ) )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
pass
def __magic_name__ ( self : Any ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __lowercase )
self.assertIsInstance(encoding.boxes , __lowercase )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ : int =LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : int =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__ : int =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ : Optional[int] =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowercase )
self.assertListEqual(encoding.boxes , __lowercase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ : str =LayoutLMvaImageProcessor(apply_ocr=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 665 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : int , __lowercase : Optional[Any] ) -> Optional[int]:
os.makedirs(__lowercase , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''source''': '''What is love ?''', '''target''': '''life'''}
SCREAMING_SNAKE_CASE__ : Dict ={'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowercase , F"{split}.{field}" ) , '''w''' ) as f:
f.write(__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : int , __lowercase : str = "pytorch" ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(__lowercase , '''output''' )
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(__lowercase , '''data''' )
self._create_dummy_data(data_dir=__lowercase )
SCREAMING_SNAKE_CASE__ : str =F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(F"--gpus={gpus}" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
SCREAMING_SNAKE_CASE__ : int =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowercase , env=self.get_env() )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(__lowercase , '''metrics.json''' )
with open(__lowercase ) as f:
SCREAMING_SNAKE_CASE__ : Any =json.load(__lowercase )
return result
@require_torch_gpu
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def __magic_name__ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 665 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a_ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : bool , __lowercase : str = None , __lowercase : list = None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : int =os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
SCREAMING_SNAKE_CASE__ : Any =os.path.abspath('''examples''' )
for item in os.listdir(__lowercase ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE__ : Any =os.path.join(__lowercase , __lowercase )
if os.path.isfile(__lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowercase , feature_script=__lowercase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =compare_against_test(
os.path.join(__lowercase , __lowercase ) , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''\n'''.join(__lowercase )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE__ : List[str] =diff.replace(__lowercase , '''''' )
self.assertEqual(__lowercase , '''''' )
def __magic_name__ ( self : int ) -> int:
self.one_complete_example('''complete_nlp_example.py''' , __lowercase )
self.one_complete_example('''complete_nlp_example.py''' , __lowercase )
def __magic_name__ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __lowercase , __lowercase , __lowercase )
self.one_complete_example('''complete_cv_example.py''' , __lowercase , __lowercase , __lowercase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = False
@classmethod
def __magic_name__ ( cls : Dict ) -> int:
super().setUpClass()
SCREAMING_SNAKE_CASE__ : List[Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__ : Optional[int] =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __magic_name__ ( cls : Union[str, Any] ) -> Union[str, Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __magic_name__ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __magic_name__ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
SCREAMING_SNAKE_CASE__ : List[Any] =run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
SCREAMING_SNAKE_CASE__ : str =run_command(self._launch_args + testargs , return_stdout=__lowercase )
self.assertNotIn('''epoch 0:''' , __lowercase )
self.assertIn('''epoch 1:''' , __lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =run_command(self._launch_args + testargs , return_stdout=__lowercase )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __lowercase )
self.assertIn('''epoch 1:''' , __lowercase )
else:
self.assertIn('''epoch 0:''' , __lowercase )
self.assertIn('''epoch 1:''' , __lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
SCREAMING_SNAKE_CASE__ : List[Any] =run_command(self._launch_args + testargs , return_stdout=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =re.findall('''({.+})''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =[r for r in results if '''accuracy''' in r][-1]
SCREAMING_SNAKE_CASE__ : Tuple =ast.literal_eval(__lowercase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __magic_name__ ( self : Tuple ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE__ : Optional[Any] =F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''tracking''' ) ) )
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : str =['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 665 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : List[Any] =num_channels
SCREAMING_SNAKE_CASE__ : int =patch_size
SCREAMING_SNAKE_CASE__ : Tuple =num_frames
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : Any =scope
SCREAMING_SNAKE_CASE__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels
return config
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )
# verify the logits shape
SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowercase )
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int:
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __magic_name__ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
SCREAMING_SNAKE_CASE__ : str =False
SCREAMING_SNAKE_CASE__ : Tuple =True
SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : List[Any] =True
SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[int] =True
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + 1 , len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Any ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =prepare_video()
SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
if isinstance(UpperCamelCase__, collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __SCREAMING_SNAKE_CASE :
def __magic_name__ ( self : Tuple , __lowercase : Tuple , __lowercase : Tuple ) -> Optional[Any]:
pass
def __magic_name__ ( self : List[Any] ) -> Any:
pass
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Any , __lowercase : Dict , __lowercase : Optional[int]=None , **__lowercase : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =TFVisionTextDualEncoderModel(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __magic_name__ ( self : int , __lowercase : str , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : str=None , **__lowercase : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Any , __lowercase : int , __lowercase : Tuple , __lowercase : Union[str, Any]=None , **__lowercase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] ={'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE__ : Optional[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Any , __lowercase : str=None , **__lowercase : str ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : str =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =after_output[0].numpy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : List[Any]=None , **__lowercase : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : int =to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ : Dict =to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : float ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , F"Difference between torch and flax is {diff} (>= {tol})." )
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowercase )
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def __magic_name__ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@slow
def __magic_name__ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] =model_a(**__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : str =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : int =model_a(**__lowercase )
SCREAMING_SNAKE_CASE__ : str =after_outputs[0].numpy()
SCREAMING_SNAKE_CASE__ : Optional[int] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
SCREAMING_SNAKE_CASE__ : List[str] =13
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] =random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : Dict ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFViTModel(__lowercase , name='''vision_model''' )
SCREAMING_SNAKE_CASE__ : Dict =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __magic_name__ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =TFViTModelTester(self )
SCREAMING_SNAKE_CASE__ : Any =TFBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] =vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Any =bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =vision_config_and_inputs
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> int:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
SCREAMING_SNAKE_CASE__ : List[str] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
SCREAMING_SNAKE_CASE__ : Any =13
SCREAMING_SNAKE_CASE__ : Tuple =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] =random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : str=None , **__lowercase : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_vision_text_model(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ : int =to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ : Tuple =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ : Any =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ ( self : str , __lowercase : List[str] , __lowercase : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Dict =TFDeiTModel(__lowercase , name='''vision_model''' )
SCREAMING_SNAKE_CASE__ : List[Any] =TFRobertaModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __magic_name__ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] =TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : str =TFRobertaModelTester(self )
SCREAMING_SNAKE_CASE__ : str =vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Any =bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =vision_config_and_inputs
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Dict =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
def __magic_name__ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =13
SCREAMING_SNAKE_CASE__ : Any =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
SCREAMING_SNAKE_CASE__ : Dict =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
SCREAMING_SNAKE_CASE__ : Dict =random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =TFCLIPVisionModel(__lowercase , name='''vision_model''' )
SCREAMING_SNAKE_CASE__ : Any =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] =TFBertModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] =clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[str] =bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =vision_config_and_inputs
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Any =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __magic_name__ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] =TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__lowercase )
SCREAMING_SNAKE_CASE__ : str =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowercase , padding=__lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : Dict =model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowercase , atol=1e-3 ) )
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 | 1 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ = True
except ImportError:
a_ = False
try:
from torch.hub import _get_torch_home
a_ = _get_torch_home()
except ImportError:
a_ = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ = os.path.join(torch_cache_home, 'transformers')
a_ = 'https://cdn.huggingface.co'
a_ = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ = os.path.join(PATH, 'config.yaml')
a_ = os.path.join(PATH, 'attributes.txt')
a_ = os.path.join(PATH, 'objects.txt')
a_ = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ = 'pytorch_model.bin'
a_ = 'config.yaml'
def _a( UpperCamelCase__ : Union[str, Any]=OBJECTS, UpperCamelCase__ : str=ATTRIBUTES ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[]
with open(UpperCamelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
with open(UpperCamelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =OrderedDict()
with open(UpperCamelCase__, '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =pkl.load(UpperCamelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE__ : str =ckp.pop(UpperCamelCase__ )
if isinstance(UpperCamelCase__, np.ndarray ):
SCREAMING_SNAKE_CASE__ : Any =torch.tensor(UpperCamelCase__ )
else:
assert isinstance(UpperCamelCase__, torch.tensor ), type(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =v
return r
class __SCREAMING_SNAKE_CASE :
snake_case_ = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =name
SCREAMING_SNAKE_CASE__ : int =level
SCREAMING_SNAKE_CASE__ : Any ={}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE__ : int =copy.deepcopy(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =Config(__lowercase , name=__lowercase , level=level + 1 )
SCREAMING_SNAKE_CASE__ : int =v
setattr(self , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =d
def __repr__( self : Optional[int] ) -> Dict:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] =val
SCREAMING_SNAKE_CASE__ : Optional[Any] =val
SCREAMING_SNAKE_CASE__ : Optional[Any] =key.split('''.''' )
SCREAMING_SNAKE_CASE__ : str =len(__lowercase ) - 1
SCREAMING_SNAKE_CASE__ : Dict =self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , '''.'''.join(levels[i:] ) , __lowercase )
if l == last_level:
SCREAMING_SNAKE_CASE__ : Optional[Any] =val
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pointer[l]
def __magic_name__ ( self : Dict ) -> Optional[Any]:
return self._pointer
def __magic_name__ ( self : int , __lowercase : Tuple , __lowercase : str ) -> Union[str, Any]:
with open(F"{file_name}" , '''w''' ) as stream:
dump(__lowercase , __lowercase )
def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : Any ) -> Tuple:
with open(F"{file_name}" , '''w''' ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def __magic_name__ ( __lowercase : Any ) -> Union[str, Any]:
with open(__lowercase ) as stream:
SCREAMING_SNAKE_CASE__ : str =load(__lowercase , Loader=__lowercase )
return data
def __str__( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =''' '''
if self._name != "root":
SCREAMING_SNAKE_CASE__ : Tuple =F"{t * (self._level-1)}{self._name}:\n"
else:
SCREAMING_SNAKE_CASE__ : str =''''''
SCREAMING_SNAKE_CASE__ : str =self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"
SCREAMING_SNAKE_CASE__ : Dict =level
return r[:-1]
@classmethod
def __magic_name__ ( cls : Dict , __lowercase : str , **__lowercase : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def __magic_name__ ( cls : Union[str, Any] , __lowercase : str , **__lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =kwargs.pop('''cache_dir''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =kwargs.pop('''force_download''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =kwargs.pop('''resume_download''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =kwargs.pop('''proxies''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =kwargs.pop('''local_files_only''' , __lowercase )
if os.path.isdir(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
SCREAMING_SNAKE_CASE__ : Dict =pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ : Optional[int] =cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Config.load_yaml(__lowercase )
except EnvironmentError:
SCREAMING_SNAKE_CASE__ : List[str] ='''Can\'t load config for'''
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(__lowercase ), kwargs
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =torch.load('''dump.pt''', map_location=in_tensor.device )
SCREAMING_SNAKE_CASE__ : Optional[int] =in_tensor.numpy()
SCREAMING_SNAKE_CASE__ : Tuple =out_tensor.numpy()[0]
print(na.shape, na[0, 0, :5] )
print(na.shape, na[0, 0, :5] )
assert np.allclose(UpperCamelCase__, UpperCamelCase__, rtol=0.0_1, atol=0.1 ), (
f"{sum([1 for x in np.isclose(UpperCamelCase__, UpperCamelCase__, rtol=0.0_1, atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =urlparse(UpperCamelCase__ )
return parsed.scheme in ("http", "https")
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : Dict=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE__ : Any ='''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Optional[int]=0, UpperCamelCase__ : Optional[int]=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] ='''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(UpperCamelCase__, UpperCamelCase__ ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE__ : int ={'''user-agent''': ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE__ : Tuple ='''bytes=%d-''' % (resume_size,)
SCREAMING_SNAKE_CASE__ : List[str] =requests.get(UpperCamelCase__, stream=UpperCamelCase__, proxies=UpperCamelCase__, headers=UpperCamelCase__ )
if response.status_code == 4_1_6: # Range not satisfiable
return
SCREAMING_SNAKE_CASE__ : List[str] =response.headers.get('''Content-Length''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =resume_size + int(UpperCamelCase__ ) if content_length is not None else None
SCREAMING_SNAKE_CASE__ : Dict =tqdm(
unit='''B''', unit_scale=UpperCamelCase__, total=UpperCamelCase__, initial=UpperCamelCase__, desc='''Downloading''', )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase__ ) )
temp_file.write(UpperCamelCase__ )
progress.close()
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str]=None, UpperCamelCase__ : Tuple=False, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Any=1_0, UpperCamelCase__ : Union[str, Any]=False, UpperCamelCase__ : str=None, UpperCamelCase__ : List[str]=False, ):
'''simple docstring'''
if cache_dir is None:
SCREAMING_SNAKE_CASE__ : List[Any] =TRANSFORMERS_CACHE
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =str(UpperCamelCase__ )
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE__ : List[str] =requests.head(UpperCamelCase__, allow_redirects=UpperCamelCase__, proxies=UpperCamelCase__, timeout=UpperCamelCase__ )
if response.status_code == 2_0_0:
SCREAMING_SNAKE_CASE__ : Optional[int] =response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE__ : Tuple =url_to_filename(UpperCamelCase__, UpperCamelCase__ )
# get cache path to put the file
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, UpperCamelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase__ ):
return cache_path
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
file
for file in fnmatch.filter(os.listdir(UpperCamelCase__ ), filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(UpperCamelCase__ ) > 0:
return os.path.join(UpperCamelCase__, matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cache_path + '''.lock'''
with FileLock(UpperCamelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE__ : str =cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase__, '''a+b''' ) as f:
yield f
SCREAMING_SNAKE_CASE__ : Optional[int] =_resumable_file_manager
if os.path.exists(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =os.stat(UpperCamelCase__ ).st_size
else:
SCREAMING_SNAKE_CASE__ : Dict =0
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =partial(tempfile.NamedTemporaryFile, dir=UpperCamelCase__, delete=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''', UpperCamelCase__, temp_file.name, )
http_get(
UpperCamelCase__, UpperCamelCase__, proxies=UpperCamelCase__, resume_size=UpperCamelCase__, user_agent=UpperCamelCase__, )
os.replace(temp_file.name, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any ={'''url''': url, '''etag''': etag}
SCREAMING_SNAKE_CASE__ : List[str] =cache_path + '''.json'''
with open(UpperCamelCase__, '''w''' ) as meta_file:
json.dump(UpperCamelCase__, UpperCamelCase__ )
return cache_path
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =url.encode('''utf-8''' )
SCREAMING_SNAKE_CASE__ : int =shaaaa(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE__ : int =etag.encode('''utf-8''' )
SCREAMING_SNAKE_CASE__ : str =shaaaa(UpperCamelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str]=None, UpperCamelCase__ : str=False, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=False, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Union[str, Any]=False, UpperCamelCase__ : int=False, UpperCamelCase__ : List[Any]=False, ):
'''simple docstring'''
if cache_dir is None:
SCREAMING_SNAKE_CASE__ : List[Any] =TRANSFORMERS_CACHE
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : str =str(UpperCamelCase__ )
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =str(UpperCamelCase__ )
if is_remote_url(UpperCamelCase__ ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE__ : Optional[int] =get_from_cache(
UpperCamelCase__, cache_dir=UpperCamelCase__, force_download=UpperCamelCase__, proxies=UpperCamelCase__, resume_download=UpperCamelCase__, user_agent=UpperCamelCase__, local_files_only=UpperCamelCase__, )
elif os.path.exists(UpperCamelCase__ ):
# File, and it exists.
SCREAMING_SNAKE_CASE__ : str =url_or_filename
elif urlparse(UpperCamelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(UpperCamelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(UpperCamelCase__ ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase__ ) and not tarfile.is_tarfile(UpperCamelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =os.path.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output_file.replace('''.''', '''-''' ) + '''-extracted'''
SCREAMING_SNAKE_CASE__ : int =os.path.join(UpperCamelCase__, UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output_path + '''.lock'''
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__, ignore_errors=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ )
if is_zipfile(UpperCamelCase__ ):
with ZipFile(UpperCamelCase__, '''r''' ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(UpperCamelCase__ ) )
return output_path_extracted
return output_path
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int]="," ):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ : str =eval(f.read() )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =requests.get(UpperCamelCase__ )
try:
SCREAMING_SNAKE_CASE__ : int =requests.json()
except Exception:
SCREAMING_SNAKE_CASE__ : Any =req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] =eval(UpperCamelCase__ )
except Exception:
SCREAMING_SNAKE_CASE__ : Optional[int] =data.split('''\n''' )
req.close()
return data
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =requests.get(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.array(Image.open(BytesIO(response.content ) ) )
return img
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase__ )
with open(UpperCamelCase__, '''rb''' ) as stream:
SCREAMING_SNAKE_CASE__ : int =pkl.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =weights.pop('''model''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
for k, v in model.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.from_numpy(UpperCamelCase__ )
if "running_var" in k:
SCREAMING_SNAKE_CASE__ : str =torch.tensor([0] )
SCREAMING_SNAKE_CASE__ : Optional[int] =k.replace('''running_var''', '''num_batches_tracked''' )
SCREAMING_SNAKE_CASE__ : List[Any] =zero
return new
def _a( ):
'''simple docstring'''
print(f"{os.path.abspath(os.path.join(UpperCamelCase__, os.pardir ) )}/demo.ipynb" )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict="RGB" ):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cva.imread(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : Any =get_image_from_url(UpperCamelCase__ )
assert img is not None, f"could not connect to: {im}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cva.cvtColor(UpperCamelCase__, cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE__ : str =img[:, :, ::-1]
return img
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : int=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0, len(UpperCamelCase__ ), UpperCamelCase__ ))
| 665 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """mobilenet_v2"""
def __init__( self : Dict , __lowercase : int=3 , __lowercase : Optional[Any]=2_24 , __lowercase : Dict=1.0 , __lowercase : Optional[Any]=8 , __lowercase : Dict=8 , __lowercase : Tuple=6 , __lowercase : Dict=32 , __lowercase : Optional[Any]=True , __lowercase : Dict=True , __lowercase : Any="relu6" , __lowercase : Tuple=True , __lowercase : List[str]=0.8 , __lowercase : int=0.02 , __lowercase : Optional[Any]=0.001 , __lowercase : int=2_55 , **__lowercase : str , ) -> Dict:
super().__init__(**__lowercase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
SCREAMING_SNAKE_CASE__ : List[str] =num_channels
SCREAMING_SNAKE_CASE__ : List[Any] =image_size
SCREAMING_SNAKE_CASE__ : int =depth_multiplier
SCREAMING_SNAKE_CASE__ : List[Any] =depth_divisible_by
SCREAMING_SNAKE_CASE__ : List[str] =min_depth
SCREAMING_SNAKE_CASE__ : Optional[int] =expand_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] =output_stride
SCREAMING_SNAKE_CASE__ : List[str] =first_layer_is_expansion
SCREAMING_SNAKE_CASE__ : str =finegrained_output
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =tf_padding
SCREAMING_SNAKE_CASE__ : int =classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] =semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = version.parse("""1.11""" )
@property
def __magic_name__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Any ) -> float:
return 1e-4
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 5_1_2}
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =set()
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : List[Any] =char
SCREAMING_SNAKE_CASE__ : Any =set(UpperCamelCase__ )
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Optional[int]="__start__" , __lowercase : Union[str, Any]="__end__" , __lowercase : int="__unk__" , __lowercase : Union[str, Any]="__null__" , **__lowercase : Optional[Any] , ) -> int:
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : Tuple =[tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ : Dict =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : str ={}
@property
def __magic_name__ ( self : Union[str, Any] ) -> int:
return len(self.encoder )
def __magic_name__ ( self : List[str] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> str:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Dict =re.sub('''([.,!?()])''' , r''' \1''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''(\')''' , r''' \1 ''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =re.sub(r'''\s{2,}''' , ''' ''' , __lowercase )
if "\n" in token:
SCREAMING_SNAKE_CASE__ : Any =token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE__ : Dict =token.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
for token in tokens:
if not len(__lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Optional[int] =token.lower()
SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ : int =get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
SCREAMING_SNAKE_CASE__ : str =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =bigram
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : str =0
while i < len(__lowercase ):
try:
SCREAMING_SNAKE_CASE__ : List[Any] =word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : List[str] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : Any =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_word
if len(__lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Dict =get_pairs(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict ='''@@ '''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[:-4]
SCREAMING_SNAKE_CASE__ : Any =word
words.append(__lowercase )
return " ".join(__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : str =re.findall(r'''\S+\n?''' , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : Any =token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : List[str] , __lowercase : int ) -> str:
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : str , __lowercase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : int =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : Dict =0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 665 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['MaskFormerFeatureExtractor']
a_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
a_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =generate_pascal_triangle(UpperCamelCase__ )
for row_idx in range(UpperCamelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx], end=''' ''' )
else:
print(triangle[row_idx][col_idx], end='''''' )
print()
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
SCREAMING_SNAKE_CASE__ : list[list[int]] =[]
for current_row_idx in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =populate_current_row(UpperCamelCase__, UpperCamelCase__ )
triangle.append(UpperCamelCase__ )
return triangle
def _a( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =1, 1
for current_col_idx in range(1, UpperCamelCase__ ):
calculate_current_element(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return current_row
def _a( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : list[int], UpperCamelCase__ : int, UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =triangle[current_row_idx - 1][current_col_idx - 1]
SCREAMING_SNAKE_CASE__ : str =triangle[current_row_idx - 1][current_col_idx]
SCREAMING_SNAKE_CASE__ : str =above_to_left_elt + above_to_right_elt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
SCREAMING_SNAKE_CASE__ : list[list[int]] =[[1]]
for row_index in range(1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =[0] + result[-1] + [0]
SCREAMING_SNAKE_CASE__ : List[Any] =row_index + 1
# Calculate the number of distinct elements in a row
SCREAMING_SNAKE_CASE__ : int =sum(divmod(UpperCamelCase__, 2 ) )
SCREAMING_SNAKE_CASE__ : str =[
temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 )
]
SCREAMING_SNAKE_CASE__ : int =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
SCREAMING_SNAKE_CASE__ : Optional[int] =row_first_half + row_second_half
result.append(UpperCamelCase__ )
return result
def _a( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase__ : Callable, UpperCamelCase__ : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] =f"{func.__name__}({value})"
SCREAMING_SNAKE_CASE__ : Optional[int] =timeit(f"__main__.{call}", setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase__, UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Accelerator, UpperCamelCase__ : int = 1_6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Dict =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : str =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] =datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : int =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : str =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Any =1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple =8
else:
SCREAMING_SNAKE_CASE__ : str =None
return tokenizer.pad(
UpperCamelCase__, padding='''longest''', max_length=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', UpperCamelCase__ ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] =2
# New Code #
SCREAMING_SNAKE_CASE__ : int =int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Dict =Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=UpperCamelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Optional[int] =config['''lr''']
SCREAMING_SNAKE_CASE__ : Dict =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Optional[int] =evaluate.load('''glue''', '''mrpc''' )
set_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_dataloaders(UpperCamelCase__, UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : List[Any] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : str =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Dict =AdamW(params=model.parameters(), lr=UpperCamelCase__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=1_0_0, num_training_steps=(len(UpperCamelCase__ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output.loss
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__, references=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCamelCase__, default=UpperCamelCase__, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=UpperCamelCase__, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ : List[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : str , *__lowercase : int , **__lowercase : List[Any] ) -> None:
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 665 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 665 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Dict=1 , __lowercase : List[Any]=False , **__lowercase : str ) -> Optional[Any]:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =vocab_size
SCREAMING_SNAKE_CASE__ : Dict =d_embed
SCREAMING_SNAKE_CASE__ : Dict =d_proj
SCREAMING_SNAKE_CASE__ : Optional[Any] =cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE__ : List[str] =[0] + self.cutoffs
SCREAMING_SNAKE_CASE__ : str =div_val
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.cutoffs[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE__ : Optional[int] =keep_order
SCREAMING_SNAKE_CASE__ : Tuple =[]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
def __magic_name__ ( self : Any , __lowercase : int ) -> Tuple:
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE__ : Any =self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__lowercase , name='''cluster_weight''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__lowercase , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE__ : str =self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__lowercase , name=F"out_projs_._{i}" , )
self.out_projs.append(__lowercase )
else:
self.out_projs.append(__lowercase )
SCREAMING_SNAKE_CASE__ : int =self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__lowercase , name=F"out_layers_._{i}_._weight" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__lowercase , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ : str =self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE__ : Any =self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__lowercase , name=F"out_projs_._{i}" )
self.out_projs.append(__lowercase )
SCREAMING_SNAKE_CASE__ : int =self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__lowercase , name=F"out_layers_._{i}_._weight" , )
SCREAMING_SNAKE_CASE__ : Dict =self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__lowercase , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(__lowercase )
@staticmethod
def __magic_name__ ( __lowercase : List[str] , __lowercase : Tuple , __lowercase : Any , __lowercase : str=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =x
if proj is not None:
SCREAMING_SNAKE_CASE__ : List[Any] =tf.einsum('''ibd,ed->ibe''' , __lowercase , __lowercase )
return tf.einsum('''ibd,nd->ibn''' , __lowercase , __lowercase ) + b
@staticmethod
def __magic_name__ ( __lowercase : List[Any] , __lowercase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =shape_list(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tf.range(lp_size[0] , dtype=target.dtype )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.stack([r, target] , 1 )
return tf.gather_nd(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : str=True , __lowercase : int=False ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._logit(__lowercase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowercase , logits=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tf.nn.log_softmax(__lowercase , axis=-1 )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =shape_list(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : Any =tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE__ : List[Any] =(target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE__ : List[Any] =tf.where(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.boolean_mask(__lowercase , __lowercase ) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE__ : Dict =self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ : Dict =self.out_layers[i][0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE__ : Any =tf.concat([cur_W, self.cluster_weight] , 0 )
SCREAMING_SNAKE_CASE__ : int =tf.concat([cur_b, self.cluster_bias] , 0 )
SCREAMING_SNAKE_CASE__ : int =self._logit(__lowercase , __lowercase , __lowercase , self.out_projs[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.nn.log_softmax(__lowercase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
SCREAMING_SNAKE_CASE__ : int =tf.boolean_mask(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._gather_logprob(__lowercase , __lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._logit(__lowercase , __lowercase , __lowercase , self.out_projs[i] )
SCREAMING_SNAKE_CASE__ : int =tf.nn.log_softmax(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE__ : Optional[int] =head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowercase )
if target is not None:
SCREAMING_SNAKE_CASE__ : int =tf.boolean_mask(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : int =tf.boolean_mask(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =self._gather_logprob(__lowercase , __lowercase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowercase , -cur_logprob , shape_list(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.concat(__lowercase , axis=-1 )
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE__ : Dict =tf.reduce_mean(__lowercase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowercase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowercase , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """vivit"""
def __init__( self : Union[str, Any] , __lowercase : Dict=2_24 , __lowercase : List[str]=32 , __lowercase : Optional[Any]=[2, 16, 16] , __lowercase : str=3 , __lowercase : int=7_68 , __lowercase : List[str]=12 , __lowercase : str=12 , __lowercase : List[str]=30_72 , __lowercase : str="gelu_fast" , __lowercase : Tuple=0.0 , __lowercase : List[str]=0.0 , __lowercase : int=0.02 , __lowercase : int=1e-06 , __lowercase : Optional[int]=True , **__lowercase : Optional[int] , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : Tuple =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_frames
SCREAMING_SNAKE_CASE__ : List[str] =tubelet_size
SCREAMING_SNAKE_CASE__ : str =num_channels
SCREAMING_SNAKE_CASE__ : Tuple =qkv_bias
super().__init__(**__lowercase )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _a( UpperCamelCase__ : float, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =u
for i in range(1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =temp * (u - i)
return temp
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =int(input('''enter the numbers of values: ''' ) )
SCREAMING_SNAKE_CASE__ : list[list[float]] =[]
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =0
print('''enter the values of parameters in a list: ''' )
SCREAMING_SNAKE_CASE__ : int =list(map(UpperCamelCase__, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =float(input() )
SCREAMING_SNAKE_CASE__ : int =int(input('''enter the value to interpolate: ''' ) )
SCREAMING_SNAKE_CASE__ : Any =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, UpperCamelCase__ ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE__ : List[str] =y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE__ : Tuple =y[0][0]
for i in range(1, UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__, UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=4_2, ),
], )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : DatasetInfo ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__, '''dataset_info.json''' ) )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
SCREAMING_SNAKE_CASE__ : int =dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
SCREAMING_SNAKE_CASE__ : List[Any] =yaml.safe_dump(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =DatasetInfo()
SCREAMING_SNAKE_CASE__ : Optional[Any] =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=4_2, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=4_2 ),
'''v2''': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def _a( UpperCamelCase__ : int, UpperCamelCase__ : DatasetInfosDict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE__ : int =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE__ : Dict =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__, '''README.md''' ) )
| 665 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 | 1 |
'''simple docstring'''
from collections import defaultdict
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =first_str.lower().strip()
SCREAMING_SNAKE_CASE__ : Optional[Any] =second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE__ : Tuple =first_str.replace(''' ''', '''''' )
SCREAMING_SNAKE_CASE__ : List[str] =second_str.replace(''' ''', '''''' )
# Strings of different lengths are not anagrams
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE__ : defaultdict[str, int] =defaultdict(UpperCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ = input('Enter the first string ').strip()
a_ = input('Enter the second string ').strip()
a_ = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 665 |
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import math
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float ):
'''simple docstring'''
return math.pow(UpperCamelCase__, 2 ) - a
def _a( UpperCamelCase__ : float ):
'''simple docstring'''
return 2 * x
def _a( UpperCamelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =2.0
while start <= a:
SCREAMING_SNAKE_CASE__ : Optional[Any] =math.pow(UpperCamelCase__, 2 )
return start
def _a( UpperCamelCase__ : float, UpperCamelCase__ : int = 9_9_9_9, UpperCamelCase__ : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
'''simple docstring'''
if a < 0:
raise ValueError('''math domain error''' )
SCREAMING_SNAKE_CASE__ : Tuple =get_initial_point(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =value
SCREAMING_SNAKE_CASE__ : Optional[int] =value - fx(UpperCamelCase__, UpperCamelCase__ ) / fx_derivative(UpperCamelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =parent
SCREAMING_SNAKE_CASE__ : Any =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : Dict =is_training
SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] =use_labels
SCREAMING_SNAKE_CASE__ : int =vocab_size
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : Any =embedding_size
SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =num_labels
SCREAMING_SNAKE_CASE__ : List[str] =num_choices
SCREAMING_SNAKE_CASE__ : List[str] =scope
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : List[str] ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.num_choices
SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def __magic_name__ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return torch.tensor(
UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj]
SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 665 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Any ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : List[Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : str =output.images
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : int =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : Dict =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
SCREAMING_SNAKE_CASE__ : Dict ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : int =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] =sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =output.images
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : str =np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 665 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 665 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 665 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = ShapEImgaImgPipeline
snake_case_ = ["""image"""]
snake_case_ = ["""image"""]
snake_case_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
return 32
@property
def __magic_name__ ( self : List[str] ) -> Optional[int]:
return 32
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
return 8
@property
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __magic_name__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase )
return model
@property
def __magic_name__ ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase )
return model
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.dummy_prior
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer
SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any:
SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : int ='''cpu'''
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[Any] ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : List[str] =2
SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
SCREAMING_SNAKE_CASE__ : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 665 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ConvBertTokenizer
def __init__( self : int , __lowercase : int=None , __lowercase : int=None , __lowercase : Dict=True , __lowercase : Optional[int]="[UNK]" , __lowercase : str="[SEP]" , __lowercase : Union[str, Any]="[PAD]" , __lowercase : str="[CLS]" , __lowercase : Dict="[MASK]" , __lowercase : List[str]=True , __lowercase : Dict=None , **__lowercase : Dict , ) -> List[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : List[str] =do_lower_case
SCREAMING_SNAKE_CASE__ : Dict =strip_accents
SCREAMING_SNAKE_CASE__ : Any =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : List[str] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Optional[int]=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : Any =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : int , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : str =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """biogpt"""
def __init__( self : Optional[int] , __lowercase : Any=4_23_84 , __lowercase : Optional[int]=10_24 , __lowercase : Tuple=24 , __lowercase : Any=16 , __lowercase : List[str]=40_96 , __lowercase : Optional[int]="gelu" , __lowercase : Optional[int]=0.1 , __lowercase : Any=0.1 , __lowercase : Dict=10_24 , __lowercase : Union[str, Any]=0.02 , __lowercase : List[str]=1e-12 , __lowercase : Tuple=True , __lowercase : List[str]=True , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Optional[int]=1 , __lowercase : int=0 , __lowercase : Any=2 , **__lowercase : str , ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : int =intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_act
SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : str =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict =scale_embedding
SCREAMING_SNAKE_CASE__ : Dict =use_cache
SCREAMING_SNAKE_CASE__ : str =layerdrop
SCREAMING_SNAKE_CASE__ : Any =activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
| 665 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =old_name.split('''.''' )
if layer == "0":
SCREAMING_SNAKE_CASE__ : Optional[int] =old_name.replace('''0''', '''convolution1''' )
elif layer == "1":
SCREAMING_SNAKE_CASE__ : Dict =old_name.replace('''1''', '''batchnorm_before''' )
elif layer == "3":
SCREAMING_SNAKE_CASE__ : Any =old_name.replace('''3''', '''convolution2''' )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =old_name.replace('''4''', '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''', UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =R'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase__, UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ : List[str] =re.search(R'''\d\.\d\d.''', UpperCamelCase__ ).group()
else:
SCREAMING_SNAKE_CASE__ : Any =re.search(R'''\d\.\d.''', UpperCamelCase__ ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ : Tuple =old_name.replace(UpperCamelCase__, '''''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
SCREAMING_SNAKE_CASE__ : int ='''intermediate_stages.''' + trimmed_name
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =old_name.replace(UpperCamelCase__, '''''' )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ : Optional[int] =trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ : List[Any] =trimmed_name.replace('''norm1''', '''layernorm1''' )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''norm2''', '''layernorm2''' )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =trimmed_name.replace('''fc1''', '''linear_in''' )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ : List[Any] =trimmed_name.replace('''fc2''', '''linear_out''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''', UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =old_name.replace('''network''', '''intermediate_stages''' )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =new_name.replace('''fc''', '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ : str =new_name.replace('''norm1''', '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ : Optional[int] =new_name.replace('''norm2''', '''batchnorm_after''' )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ : List[Any] =new_name.replace('''proj''', '''projection''' )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =new_name.replace('''dist_head''', '''distillation_classifier''' )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ : List[Any] =new_name.replace('''head''', '''classifier''' )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ : Any ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ : List[Any] =new_name.replace('''norm''', '''layernorm''' )
SCREAMING_SNAKE_CASE__ : List[str] ='''efficientformer.''' + new_name
else:
SCREAMING_SNAKE_CASE__ : Any ='''efficientformer.encoder.''' + new_name
return new_name
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict ):
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ : str =checkpoint.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =val
return checkpoint
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : Dict =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return image
def _a( UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ : str =EfficientFormerConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
SCREAMING_SNAKE_CASE__ : Optional[int] =config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] =convert_torch_checkpoint(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ : List[Any] =prepare_img()
SCREAMING_SNAKE_CASE__ : Any =2_5_6
SCREAMING_SNAKE_CASE__ : str =2_2_4
SCREAMING_SNAKE_CASE__ : int =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Compose(
[
Resize(UpperCamelCase__, interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__, UpperCamelCase__ ),
] )
SCREAMING_SNAKE_CASE__ : Dict =image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =outputs.logits
SCREAMING_SNAKE_CASE__ : Tuple =(1, 1_0_0_0)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ : int =torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(UpperCamelCase__ )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add model''', use_temp_dir=UpperCamelCase__, )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
a_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 665 |
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a_ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
a_ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
a_ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : bool, UpperCamelCase__ : Optional[Dict[int, int]] = None, UpperCamelCase__ : bool = False, ):
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE__ : Optional[int] =new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE__ : int =np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =np.array(UpperCamelCase__ )
if reduce_labels:
SCREAMING_SNAKE_CASE__ : str =2_5_5
SCREAMING_SNAKE_CASE__ : str =label - 1
SCREAMING_SNAKE_CASE__ : Tuple =2_5_5
SCREAMING_SNAKE_CASE__ : Union[str, Any] =label != ignore_index
SCREAMING_SNAKE_CASE__ : str =np.not_equal(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =pred_label[mask]
SCREAMING_SNAKE_CASE__ : Tuple =np.array(UpperCamelCase__ )[mask]
SCREAMING_SNAKE_CASE__ : List[Any] =pred_label[pred_label == label]
SCREAMING_SNAKE_CASE__ : List[Any] =np.histogram(UpperCamelCase__, bins=UpperCamelCase__, range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =np.histogram(UpperCamelCase__, bins=UpperCamelCase__, range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : List[str] =np.histogram(UpperCamelCase__, bins=UpperCamelCase__, range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : str =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : int, UpperCamelCase__ : bool, UpperCamelCase__ : Optional[Dict[int, int]] = None, UpperCamelCase__ : bool = False, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =np.zeros((num_labels,), dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : List[str] =np.zeros((num_labels,), dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Dict =np.zeros((num_labels,), dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : List[str] =np.zeros((num_labels,), dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =intersect_and_union(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : bool, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : Optional[Dict[int, int]] = None, UpperCamelCase__ : bool = False, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =total_intersect_and_union(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# compute metrics
SCREAMING_SNAKE_CASE__ : Tuple ={}
SCREAMING_SNAKE_CASE__ : List[Any] =total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE__ : int =total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE__ : Tuple =np.nanmean(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =np.nanmean(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =all_acc
SCREAMING_SNAKE_CASE__ : int =iou
SCREAMING_SNAKE_CASE__ : Optional[Any] =acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE__ : Tuple ={metric: np.nan_to_num(UpperCamelCase__, nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : Dict ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : str , __lowercase : int , __lowercase : bool , __lowercase : Optional[int] = None , __lowercase : Optional[Dict[int, int]] = None , __lowercase : bool = False , ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 665 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
| 665 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.