code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCamelCase_ (torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , A : List[str]="sayef/fsner-bert-base-uncased" ):
super(_lowerCAmelCase , self ).__init__()
_UpperCAmelCase : str = AutoModel.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
_UpperCAmelCase : Dict = torch.nn.CosineSimilarity(3 , 1E-08 )
_UpperCAmelCase : Tuple = torch.nn.Softmax(dim=1 )
def _A ( self : List[str] , **A : Dict ):
return self.bert(**_lowerCAmelCase ).last_hidden_state
def _A ( self : Optional[Any] , A : Dict ):
return token_embeddings.sum(2 , keepdim=_lowerCAmelCase )
def _A ( self : Any , A : str , A : List[Any] , A : str=1 ):
return self.softmax(T * self.cos(_lowerCAmelCase , _lowerCAmelCase ) )
def _A ( self : Any , A : str , A : int ):
_UpperCAmelCase : List[Any] = W_supports["sizes"].tolist()
_UpperCAmelCase : Union[str, Any] = W_supports["start_token_id"].item()
_UpperCAmelCase : Optional[int] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCAmelCase : List[str] = self.BERT(**_lowerCAmelCase )
_UpperCAmelCase : int = self.BERT(**_lowerCAmelCase )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Union[str, Any] = W_supports["input_ids"] == start_token_id
_UpperCAmelCase : List[str] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCAmelCase ):
if i == 0:
_UpperCAmelCase : List[str] = 0
else:
_UpperCAmelCase : Union[str, Any] = support_sizes[i - 1]
_UpperCAmelCase : Optional[int] = S[s : s + size][start_token_masks[s : s + size]]
_UpperCAmelCase : List[Any] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCAmelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCAmelCase : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCAmelCase : Optional[int] = torch.vstack((p_starts, p_start) )
_UpperCAmelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
_UpperCAmelCase : Dict = p_start
_UpperCAmelCase : List[str] = p_end
return p_starts, p_ends
| 31 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = tuple[int, int, int]
SCREAMING_SNAKE_CASE__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE__ = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
SCREAMING_SNAKE_CASE__ = """FOBHMDKEXQNRAULPGSJVTYICZW"""
SCREAMING_SNAKE_CASE__ = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
SCREAMING_SNAKE_CASE__ = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE__ = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
SCREAMING_SNAKE_CASE__ = """SGLCPQWZHKXAREONTFBVIYJUDM"""
SCREAMING_SNAKE_CASE__ = """HVSICLTYKQUBXDWAJZOMFGPREN"""
SCREAMING_SNAKE_CASE__ = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
SCREAMING_SNAKE_CASE__ = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
SCREAMING_SNAKE_CASE__ = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT , __lowerCamelCase: str ):
'''simple docstring'''
if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3:
lowercase_ = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(__lowerCamelCase )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ = rotpos
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
# Validates string and returns dict
lowercase_ = _plugboard(__lowerCamelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = F'Plugboard setting isn\'t type string ({type(__lowerCamelCase )})'
raise TypeError(__lowerCamelCase )
elif len(__lowerCamelCase ) % 2 != 0:
lowercase_ = F'Odd number of symbols ({len(__lowerCamelCase )})'
raise Exception(__lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
lowercase_ = set()
for i in pbstring:
if i not in abc:
lowercase_ = F'\'{i}\' not in list of symbols'
raise Exception(__lowerCamelCase )
elif i in tmppbl:
lowercase_ = F'Duplicate symbol ({i})'
raise Exception(__lowerCamelCase )
else:
tmppbl.add(__lowerCamelCase )
del tmppbl
# Created the dictionary
lowercase_ = {}
for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ):
lowercase_ = pbstring[j + 1]
lowercase_ = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT = (rotora, rotora, rotora) , __lowerCamelCase: str = "" , ):
'''simple docstring'''
lowercase_ = text.upper()
lowercase_ , lowercase_ , lowercase_ = _validator(
__lowerCamelCase , __lowerCamelCase , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ = rotor_position
lowercase_ , lowercase_ , lowercase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# rotor ra --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rb --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rc --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ = reflector[symbol]
# 2nd rotors
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """This is my Python script that emulates the Enigma machine from WWII."""
SCREAMING_SNAKE_CASE__ = (1, 1, 1)
SCREAMING_SNAKE_CASE__ = """pictures"""
SCREAMING_SNAKE_CASE__ = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE__ = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 297 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
snake_case_ : str = '\\n\n'
snake_case_ : str = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
snake_case_ : List[str] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : int=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCamelCase : int = 'cuda'
else:
_UpperCamelCase : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
_UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Dict = model.to(lowerCamelCase__ )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCamelCase : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCamelCase : List[Any] = model.config.max_length - 1
else:
_UpperCamelCase : Tuple = model.config.max_length
_UpperCamelCase : Tuple = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
_UpperCamelCase : str = encodings['input_ids']
_UpperCamelCase : int = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Dict = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
_UpperCamelCase : str = min(start_index + batch_size ,len(lowerCamelCase__ ) )
_UpperCamelCase : Tuple = encoded_texts[start_index:end_index]
_UpperCamelCase : List[str] = attn_masks[start_index:end_index]
if add_start_token:
_UpperCamelCase : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
_UpperCamelCase : List[Any] = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
_UpperCamelCase : Tuple = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
_UpperCamelCase : Tuple = encoded_batch
with torch.no_grad():
_UpperCamelCase : Any = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
_UpperCamelCase : Dict = out_logits[..., :-1, :].contiguous()
_UpperCamelCase : List[str] = labels[..., 1:].contiguous()
_UpperCamelCase : Optional[int] = attn_mask[..., 1:].contiguous()
_UpperCamelCase : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 83 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | 0 |
from timeit import timeit
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
__A = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
__A = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
def do_benchmark(a_ ) -> None:
__A = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(a_ ) = }''' )
__A = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=a_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(a_ ) = }''' )
__A = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=a_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(a_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : List[str] ,A : List[Any]=7 ,A : Any=3 ,A : int=30 ,A : List[Any]=4_00 ,A : str=True ,A : int=None ,A : List[str]=0.9 ,A : Dict=None ,A : int=True ,A : Any=[0.5, 0.5, 0.5] ,A : Optional[int]=[0.5, 0.5, 0.5] ,):
__A = size if size is not None else {"shortest_edge": 30}
__A = crop_size if crop_size is not None else {"height": 30, "width": 30}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize_and_center_crop
__A = size
__A = crop_pct
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
def UpperCamelCase_ ( self : int ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
__A = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Tuple ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"crop_pct" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
__A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 124 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "swinv2"
UpperCAmelCase__ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , A_=224 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1E-5 , A_=32 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =embed_dim
__UpperCamelCase =depths
__UpperCamelCase =len(A_ )
__UpperCamelCase =num_heads
__UpperCamelCase =window_size
__UpperCamelCase =mlp_ratio
__UpperCamelCase =qkv_bias
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =drop_path_rate
__UpperCamelCase =hidden_act
__UpperCamelCase =use_absolute_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =initializer_range
__UpperCamelCase =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase =int(embed_dim * 2 ** (len(A_ ) - 1) )
__UpperCamelCase =(0, 0, 0, 0)
| 62 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = "arrow", **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
split=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, keep_in_memory=SCREAMING_SNAKE_CASE_, streaming=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = load_from_cache_file
UpperCamelCase : List[str] = file_format
UpperCamelCase : Optional[int] = Spark(
df=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, working_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE_, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 119 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = ['image_processor', 'tokenizer']
lowerCAmelCase__ : Dict = 'ChineseCLIPImageProcessor'
lowerCAmelCase__ : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> int:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__UpperCAmelCase ,)
A__ = kwargs.pop('feature_extractor' )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase ,__UpperCAmelCase )
A__ = self.image_processor
def __call__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Tuple:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
A__ = self.tokenizer(__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,**__UpperCAmelCase )
if images is not None:
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,**__UpperCAmelCase )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) ,tensor_type=__UpperCAmelCase )
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase ,**__UpperCAmelCase )
@property
def snake_case__ ( self ) -> Dict:
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case__ ( self ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,__UpperCAmelCase ,)
return self.image_processor_class
| 154 | """simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 100 ):
"""simple docstring"""
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 154 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :List[str] = {'''vocab_file''': '''spiece.model'''}
__snake_case :Dict = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _A ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<sep>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : List[Any]="<cls>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Any=["<eop>", "<eod>"] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__a = 3
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__SCREAMING_SNAKE_CASE)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''')
__a = jieba
__a = str.maketrans(''' \n''' , '''\u2582\u2583''')
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self : int):
'''simple docstring'''
return len(self.sp_model)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
if self.remove_space:
__a = ''' '''.join(inputs.strip().split())
else:
__a = inputs
__a = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
__a = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE)
__a = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE)])
if self.do_lower_case:
__a = outputs.lower()
return outputs
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = self.preprocess_text(__SCREAMING_SNAKE_CASE)
__a = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE)
__a = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(__SCREAMING_SNAKE_CASE)
else:
new_pieces.append(__SCREAMING_SNAKE_CASE)
return new_pieces
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = ''''''.join(__SCREAMING_SNAKE_CASE).replace(__SCREAMING_SNAKE_CASE , ''' ''').strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE)
if token_ids_a is not None:
return ([0] * len(__SCREAMING_SNAKE_CASE)) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1, 1]
return ([0] * len(__SCREAMING_SNAKE_CASE)) + [1, 1]
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(__SCREAMING_SNAKE_CASE , '''wb''') as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
def _lowerCamelCase ( self : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = super()._decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = text.replace(''' ''' , '''''').replace('''\u2582''' , ''' ''').replace('''\u2583''' , '''\n''')
return text
| 131 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = KandinskyVaaImgaImgPipeline
UpperCamelCase__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase__ : Dict = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase__ : Any = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ : List[Any] = False
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE)
return model
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
__a = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__a = DDIMScheduler(**__SCREAMING_SNAKE_CASE)
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=0):
'''simple docstring'''
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__SCREAMING_SNAKE_CASE)
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = image.cpu().permute(0 , 2 , 3 , 1)[0]
__a = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert('''RGB''').resize((256, 256))
if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**__SCREAMING_SNAKE_CASE)
__a = pipe.to(__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE))
__a = output.images
__a = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''')
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
__a = '''A red cartoon frog, 4k'''
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(__SCREAMING_SNAKE_CASE)
__a = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa)
__a = pipeline.to(__SCREAMING_SNAKE_CASE)
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.Generator(device='''cpu''').manual_seed(0)
__a , __a = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = pipeline(
image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 131 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : Optional[int] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 324 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : str = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase__ : Optional[int] = {
'google/rembert': 2_56,
}
lowercase__ : str = '▁'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = RemBertTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 324 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : List[Any] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : Tuple = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : int = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Optional[int] = do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case_ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 36 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 319 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 189 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase__ ( unittest.TestCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , ) -> Optional[int]:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_attention_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_choices
def __A (self ) -> Tuple:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_attention_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A (self ) -> List[Any]:
_lowercase =self.prepare_config_and_inputs()
_lowercase =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A (self ) -> Tuple:
_lowercase =self.prepare_config_and_inputs()
_lowercase =config_and_inputs
_lowercase =True
_lowercase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase__ ( __lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A (self ) -> int:
_lowercase =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __A (self ) -> List[str]:
for model_class_name in self.all_model_classes:
_lowercase =model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase__ )
_lowercase =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> Dict:
_lowercase =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase__ )
_lowercase =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_lowercase =model(lowerCamelCase__ )[0]
_lowercase =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_lowercase =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def __A (self ) -> Tuple:
_lowercase =FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase__ )
_lowercase =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_lowercase =model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_lowercase =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 5 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase) , "Tatoeba directory does not exist.")
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 146 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase ( a__ ):
lowerCamelCase_ : int = 0
lowerCamelCase_ : bool = False
lowerCamelCase_ : float = 3.0
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase__ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def lowerCAmelCase_ ( self ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case_ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
snake_case_ = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
snake_case_ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , lowerCAmelCase__ )
@require_multi_gpu
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase_ = torch.nn.Linear(100, 200)
lowerCamelCase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase_ = ''''''
lowerCamelCase_ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 34 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE__ = get_logger()
SCREAMING_SNAKE_CASE__ = None
class lowerCAmelCase_ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
"""simple docstring"""
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
"""simple docstring"""
super().__init__(features=_lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_lowerCamelCase )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
snake_case = device if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
snake_case = str(jax.devices()[0] )
snake_case = jnp_array_kwargs
@staticmethod
def snake_case ( ):
"""simple docstring"""
import jax
return {str(_lowerCamelCase ): device for device in jax.devices()}
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_lowerCamelCase , _lowerCamelCase ) and column:
if all(
isinstance(_lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_lowerCamelCase , axis=0 )
return column
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_lowerCamelCase , (str, bytes, type(_lowerCamelCase )) ):
return value
elif isinstance(_lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case = {}
if isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case = {'''dtype''': jnp.intaa}
else:
snake_case = {'''dtype''': jnp.intaa}
elif isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_lowerCamelCase , PIL.Image.Image ):
snake_case = np.asarray(_lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_lowerCamelCase , '__array__' ) and not isinstance(_lowerCamelCase , jax.Array ):
snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
elif isinstance(_lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(_lowerCamelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _lowerCamelCase , map_list=_lowerCamelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.numpy_arrow_extractor().extract_row(_lowerCamelCase )
snake_case = self.python_features_decoder.decode_row(_lowerCamelCase )
return self.recursive_tensorize(_lowerCamelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.numpy_arrow_extractor().extract_column(_lowerCamelCase )
snake_case = self.python_features_decoder.decode_column(_lowerCamelCase , pa_table.column_names[0] )
snake_case = self.recursive_tensorize(_lowerCamelCase )
snake_case = self._consolidate(_lowerCamelCase )
return column
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.numpy_arrow_extractor().extract_batch(_lowerCamelCase )
snake_case = self.python_features_decoder.decode_batch(_lowerCamelCase )
snake_case = self.recursive_tensorize(_lowerCamelCase )
for column_name in batch:
snake_case = self._consolidate(batch[column_name] )
return batch
| 354 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 149 | 0 |
"""simple docstring"""
from __future__ import annotations
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = order
# a_{0} ... a_{k}
__A : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__A : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__A : int = [0.0] * self.order
# y[n-1] ... y[n-k]
__A : List[Any] = [0.0] * self.order
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
if len(__lowerCamelCase ) < self.order:
__A : Dict = [1.0, *a_coeffs]
if len(__lowerCamelCase ) != self.order + 1:
__A : str = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != self.order + 1:
__A : Optional[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
__A : Optional[int] = a_coeffs
__A : Any = b_coeffs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__A : List[str] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__A : Tuple = self.input_history[:-1]
__A : List[str] = self.output_history[:-1]
__A : Optional[int] = sample
__A : int = result
return result
| 179 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
if not conversation_id:
__A : List[Any] = uuid.uuida()
if past_user_inputs is None:
__A : List[str] = []
if generated_responses is None:
__A : Tuple = []
__A : uuid.UUID = conversation_id
__A : List[str] = past_user_inputs
__A : List[str] = generated_responses
__A : Optional[str] = text
def __eq__( self , __lowerCamelCase ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__A : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__A : Union[str, Any] = text
def UpperCamelCase__( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__A : List[Any] = None
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
self.generated_responses.append(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__A : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__A : Tuple = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__A : Union[str, Any] = self.tokenizer.eos_token
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A : str = {}
__A : List[str] = {}
__A : Any = {}
if min_length_for_response is not None:
__A : int = min_length_for_response
if minimum_tokens is not None:
__A : Any = minimum_tokens
if "max_length" in generate_kwargs:
__A : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__A : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __lowerCamelCase , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : Any = super().__call__(__lowerCamelCase , num_workers=__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=32 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__A : List[Any] = self.tokenizer._build_conversation_input_ids(__lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__A : int = self._legacy_parse_and_tokenize(__lowerCamelCase )
if self.framework == "pt":
__A : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__A : int = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=10 , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__A : str = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__A : str = max_length - minimum_tokens
__A : Any = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__A : Union[str, Any] = model_inputs['''attention_mask'''][:, -trim:]
__A : Dict = model_inputs.pop('''conversation''' )
__A : List[str] = max_length
__A : Dict = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
if self.model.config.is_encoder_decoder:
__A : Any = 1
else:
__A : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=True ):
'''simple docstring'''
__A : int = model_outputs['''output_ids''']
__A : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
__A : Dict = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__lowerCamelCase )
return conversation
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = self.tokenizer.eos_token_id
__A : List[str] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
if len(__lowerCamelCase ) > self.tokenizer.model_max_length:
__A : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 179 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase_( snake_case : str , snake_case : str , snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = 0
if start < end:
snake_case_ = randint(lowerCamelCase__ , lowerCamelCase__ )
snake_case_ = a[end]
snake_case_ = a[pivot]
snake_case_ = temp
snake_case_ , snake_case_ = _in_place_partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += _in_place_quick_sort(lowerCamelCase__ , lowerCamelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCamelCase__ , p + 1 , lowerCamelCase__ )
return count
def UpperCamelCase_( snake_case : int , snake_case : str , snake_case : int ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = randint(lowerCamelCase__ , lowerCamelCase__ )
snake_case_ = a[end]
snake_case_ = a[pivot]
snake_case_ = temp
snake_case_ = start - 1
for index in range(lowerCamelCase__ , lowerCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ = new_pivot_index + 1
snake_case_ = a[new_pivot_index]
snake_case_ = a[index]
snake_case_ = temp
snake_case_ = a[new_pivot_index + 1]
snake_case_ = a[end]
snake_case_ = temp
return new_pivot_index + 1, count
_SCREAMING_SNAKE_CASE : Union[str, Any] = TemporaryFile()
_SCREAMING_SNAKE_CASE : str = 100 # 1000 elements are to be sorted
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = 0, 1 # mean and standard deviation
_SCREAMING_SNAKE_CASE : Optional[int] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_SCREAMING_SNAKE_CASE : List[str] = np.load(outfile)
_SCREAMING_SNAKE_CASE : List[str] = len(M) - 1
_SCREAMING_SNAKE_CASE : Any = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 353 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "mgp-str"
def __init__( self , a__=[32, 128] , a__=4 , a__=3 , a__=27 , a__=38 , a__=50_257 , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=4.0 , a__=True , a__=False , a__=1e-5 , a__=0.0 , a__=0.0 , a__=0.0 , a__=False , a__=0.0_2 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = max_token_length
snake_case_ = num_character_labels
snake_case_ = num_bpe_labels
snake_case_ = num_wordpiece_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = mlp_ratio
snake_case_ = distilled
snake_case_ = layer_norm_eps
snake_case_ = drop_rate
snake_case_ = qkv_bias
snake_case_ = attn_drop_rate
snake_case_ = drop_path_rate
snake_case_ = output_aa_attentions
snake_case_ = initializer_range
| 92 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( UpperCAmelCase__ ):
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> Optional[int]:
super().__init__(**_a )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_a , default_to_square=_a )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_a , default_to_square=_a , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase = do_convert_rgb
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> str:
return rescale(_a , scale=_a , data_format=_a , **_a )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Dict:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> Any:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_a , param_name="size" , default_to_square=_a )
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_a , param_name="crop_size" , default_to_square=_a )
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_a , _a ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
| 158 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
"""simple docstring"""
from math import pi
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 367 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = BertConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase = BertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 259 | 0 |
import math
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,A : Dict=0 ): # a graph with Node 0,1,...,N-1
__A = n
__A = [
[math.inf for j in range(0 ,A )] for i in range(0 ,A )
] # adjacency matrix for weight
__A = [
[math.inf for j in range(0 ,A )] for i in range(0 ,A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : List[str] ,A : Tuple ,A : Union[str, Any] ,A : Optional[int] ):
__A = w
def UpperCamelCase_ ( self : int ):
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
__A = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Union[str, Any] ):
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[int] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 15 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array:
lowerCAmelCase__ : Dict = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
lowerCAmelCase__ : Optional[Any] = 'f32le'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ : List[Any] = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase__ : List[str] = output_stream[0]
lowerCAmelCase__ : str = np.frombuffer(SCREAMING_SNAKE_CASE_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> Dict:
lowerCAmelCase__ : Optional[Any] = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
if format_for_conversion == "s16le":
lowerCAmelCase__ : Dict = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCAmelCase__ : Tuple = platform.system()
if system == "Linux":
lowerCAmelCase__ : str = 'alsa'
lowerCAmelCase__ : str = 'default'
elif system == "Darwin":
lowerCAmelCase__ : Any = 'avfoundation'
lowerCAmelCase__ : Tuple = ':0'
elif system == "Windows":
lowerCAmelCase__ : Any = 'dshow'
lowerCAmelCase__ : int = 'default'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ : str = _ffmpeg_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
lowerCAmelCase__ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase__ : Tuple = chunk_length_s
lowerCAmelCase__ : Any = ffmpeg_microphone(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , format_for_conversion=SCREAMING_SNAKE_CASE_ )
if format_for_conversion == "s16le":
lowerCAmelCase__ : Optional[Any] = np.intaa
lowerCAmelCase__ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : Optional[Any] = np.floataa
lowerCAmelCase__ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCAmelCase__ : Dict = chunk_length_s / 6
lowerCAmelCase__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
lowerCAmelCase__ : Dict = [stride_length_s, stride_length_s]
lowerCAmelCase__ : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ : List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ : Any = datetime.datetime.now()
lowerCAmelCase__ : Any = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE_ )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE_ ):
# Put everything back in numpy scale
lowerCAmelCase__ : Any = np.frombuffer(item['raw'] , dtype=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = b''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCAmelCase__ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE_ ) < chunk_len:
lowerCAmelCase__ : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE_ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ : Dict = (_stride_left, stride_right)
lowerCAmelCase__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase__ : Optional[int] = False
yield item
lowerCAmelCase__ : Optional[int] = stride_left
lowerCAmelCase__ : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE_ ) > stride_left:
lowerCAmelCase__ : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase__ : Any = False
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE_ ) as ffmpeg_process:
while True:
lowerCAmelCase__ : List[str] = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 212 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MobileNetV2FeatureExtractor']
__UpperCAmelCase = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145 | 1 |
lowercase__ : str = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = False
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCamelCase = chain(next_number(snake_case ) )
__UpperCamelCase = number_chain
while number < 10000000:
__UpperCamelCase = number_chain
number *= 10
return number_chain
def A_ ( snake_case : int = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 328 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self )-> str:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = DistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def A__ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> List[str]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase = True
__UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
__UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
__UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 328 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = pipeline(
'document-question-answering' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :Optional[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
lowerCAmelCase__ :int = 'What is the placebo?'
lowerCAmelCase__ :int = [
{
'image': load_image(__UpperCAmelCase ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )},
{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCAmelCase__ :Tuple = INVOICE_URL
lowerCAmelCase__ :Union[str, Any] = 'How many cats are there?'
lowerCAmelCase__ :Union[str, Any] = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
lowerCAmelCase__ :Optional[int] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ :Optional[int] = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ :Union[str, Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ :Tuple = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ :Optional[Any] = []
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :Optional[int] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCAmelCase__ :str = INVOICE_URL
lowerCAmelCase__ :Optional[int] = 'What is the invoice number?'
lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
lowerCAmelCase__ :Any = INVOICE_URL
lowerCAmelCase__ :Optional[int] = 'What is the invoice number?'
lowerCAmelCase__ :Optional[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :List[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , )
lowerCAmelCase__ :Optional[int] = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowerCAmelCase__ :Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowerCAmelCase__ :Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
lowerCAmelCase__ :List[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ :Optional[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , max_seq_len=5_0 , )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :str = 'What is the invoice number?'
lowerCAmelCase__ :Dict = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Union[str, Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ :Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCAmelCase__ :int = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :str = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def snake_case ( self ):
'''simple docstring'''
pass
| 254 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :int = """swin"""
__magic_name__ :Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :str = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[Any] = window_size
lowerCAmelCase__ :List[Any] = mlp_ratio
lowerCAmelCase__ :int = qkv_bias
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :str = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 254 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def A_ ( A__ ) -> List[str]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def A_ ( A__ ) -> Tuple:
a__ : Dict = create_tensor(A__ )
a__ : Tuple = gather(A__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def A_ ( A__ ) -> Any:
a__ : Tuple = [state.process_index]
a__ : Union[str, Any] = gather_object(A__ )
assert len(A__ ) == state.num_processes, F'{gathered_obj}, {len(A__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def A_ ( A__ ) -> Dict:
a__ : List[Any] = create_tensor(A__ )
a__ : Optional[int] = broadcast(A__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def A_ ( A__ ) -> List[str]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
a__ : str = torch.arange(state.num_processes + 1 ).to(state.device )
else:
a__ : Optional[int] = torch.arange(state.num_processes ).to(state.device )
a__ : Optional[Any] = pad_across_processes(A__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def A_ ( A__ ) -> List[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : Optional[Any] = create_tensor(A__ )
a__ : str = reduce(A__ , 'sum' )
a__ : Any = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(A__ , A__ ), F'{reduced_tensor} != {truth_tensor}'
def A_ ( A__ ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : int = create_tensor(A__ )
a__ : Union[str, Any] = reduce(A__ , 'mean' )
a__ : Any = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(A__ , A__ ), F'{reduced_tensor} != {truth_tensor}'
def A_ ( A__ ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def A_ ( ) -> int:
a__ : List[str] = PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(A__ )
state.print('testing gather_object' )
test_gather_object(A__ )
state.print('testing broadcast' )
test_broadcast(A__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(A__ )
state.print('testing reduce_sum' )
test_reduce_sum(A__ )
state.print('testing reduce_mean' )
test_reduce_mean(A__ )
if __name__ == "__main__":
main()
| 99 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __snake_case ( unittest.TestCase , __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = load_tool("""text-classification""" )
self.tool.setup()
__snake_case: Dict = load_tool("""text-classification""" , remote=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
| 111 | 0 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[int] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase__ )
if number < 1:
lowercase__ : Optional[int] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase__ )
lowercase__ : List[Any] = 1
for i in range(1 , lowerCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def snake_case ( self : str ):
# create estimator
lowercase__ : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowercase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase__ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , SCREAMING_SNAKE_CASE )
| 121 | 1 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
UpperCamelCase_ = 'naver-clova-ix/donut-base'
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->int:
a_ = DonutProcessor.from_pretrained(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
a_ = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
a_ = self.processor.tokenajson(__UpperCAmelCase)
self.assertDictEqual(__UpperCAmelCase , __UpperCAmelCase) | 243 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase_ = 2
class snake_case :
def __init__( self , *, # begin keyword-only arguments
__UpperCAmelCase="<s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=None , ) ->Tuple:
a_ , a_ , a_ , a_ = bos, unk, pad, eos
a_ = []
a_ = []
a_ = {}
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCAmelCase)
a_ = len(self.symbols)
def __eq__( self , __UpperCAmelCase) ->Dict:
return self.indices == other.indices
def __getitem__( self , __UpperCAmelCase) ->Optional[Any]:
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self) ->Any:
return len(self.symbols)
def __contains__( self , __UpperCAmelCase) ->Dict:
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase) ->List[Any]:
a_ = cls()
d.add_from_file(__UpperCAmelCase)
return d
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False) ->List[Any]:
if word in self.indices and not overwrite:
a_ = self.indices[word]
a_ = self.count[idx] + n
return idx
else:
a_ = len(self.symbols)
a_ = idx
self.symbols.append(__UpperCAmelCase)
self.count.append(__UpperCAmelCase)
return idx
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
return 0
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
try:
with open(__UpperCAmelCase , "r" , encoding="utf-8") as fd:
self.add_from_file(__UpperCAmelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(__UpperCAmelCase))
return
a_ = f.readlines()
a_ = self._load_meta(__UpperCAmelCase)
for line in lines[indices_start_line:]:
try:
a_ , a_ = line.rstrip().rsplit(" " , 1)
if field == "#fairseq:overwrite":
a_ = True
a_ , a_ = line.rsplit(" " , 1)
else:
a_ = False
a_ = int(__UpperCAmelCase)
a_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(__UpperCAmelCase))
self.add_symbol(__UpperCAmelCase , n=__UpperCAmelCase , overwrite=__UpperCAmelCase)
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'")
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = dict((re.sub(r"@@$" , "" , UpperCAmelCase ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , UpperCAmelCase ), v) for k, v in d.items() )
a_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
a_ = d[k] # restore
return da
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
a_ = os.path.join(UpperCAmelCase , "checkpoint.pt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
a_ = torch.load(UpperCAmelCase , map_location="cpu" )
a_ = chkpt["cfg"]["model"]
# dicts
a_ = os.path.join(UpperCAmelCase , "dict.txt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
a_ = Dictionary.load(UpperCAmelCase )
a_ = rewrite_dict_keys(src_dict.indices )
a_ = len(UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
a_ = os.path.join(UpperCAmelCase , "bpecodes" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
# model config
a_ = os.path.join(UpperCAmelCase , "config.json" )
a_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
a_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
a_ = chkpt["model"]
# remove unneeded keys
a_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
a_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
a_ = model_state_dict.pop(UpperCAmelCase )
else:
a_ = model_state_dict.pop(UpperCAmelCase )
a_ = BioGptConfig.from_pretrained(UpperCAmelCase )
a_ = BioGptForCausalLM(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase )
# save
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("Conversion is done!" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 243 | 1 |
def UpperCamelCase ( __magic_name__ : str ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Any ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase__ = out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
A : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 146 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A : int = trt.Logger(trt.Logger.WARNING)
A : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A : Union[str, Any] = logging.getLogger(__name__)
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
A : List[Any] = parser.parse_args()
if args.tokenizer_name:
A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
A : Optional[Any] = args.per_device_eval_batch_size
A : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A : Any = True
A : Optional[int] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
A : Union[str, Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
A : Optional[int] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
A : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A : Union[str, Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ = time.time()
lowercase__ = end_time - start_time
lowercase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A : str = raw_datasets['validation'].column_names
A : Any = 'question' if 'question' in column_names else column_names[0]
A : int = 'context' if 'context' in column_names else column_names[1]
A : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A : Dict = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
A : str = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ = tokenized_examples.sequence_ids(__magic_name__ )
lowercase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
A : Optional[Any] = raw_datasets['validation']
# Validation Feature Creation
A : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
A : Dict = default_data_collator
A : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
A : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any]="eval" ) -> List[Any]:
"""simple docstring"""
lowercase__ = postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
A : Union[str, Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
A : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A : List[str] = cuda.mem_alloc(h_outputa.nbytes)
A : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
A : List[Any] = 0.0
A : Any = 0
A : str = timeit.default_timer()
A : Tuple = None
for step, batch in enumerate(eval_dataloader):
A , A : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A , A : int = outputs
A : str = torch.tensor(start_logits)
A : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
A : Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
A : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
A : List[str] = nested_truncate(all_preds, len(eval_dataset))
A : List[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
A : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
A : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 146 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase__ : Optional[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : str = size if size is not None else {'height': 20, 'width': 20}
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : Optional[Any] = num_channels
__A : Optional[Any] = image_size
__A : Any = min_resolution
__A : Dict = max_resolution
__A : Optional[int] = size
__A : Any = do_normalize
__A : Tuple = do_convert_rgb
__A : Dict = [512, 1024, 2048, 4096]
__A : List[str] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
__A : str = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw).convert('RGB')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = PixaStructImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
__A : Any = self.image_processing_class(**self.image_processor_dict)
__A : List[Any] = 2048
__A : str = image_processor(_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606) , atol=1e-3 , rtol=1e-3))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Optional[Any] = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
__A : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase):
__A : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
__A : str = 'Hello'
__A : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Any = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
__A : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : List[str] = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Any = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4)
__A : int = 3
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Any = image_processor(
_UpperCAmelCase , return_tensors='pt' , max_patches=_UpperCAmelCase).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 190 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = tempfile.mkdtemp()
__A : Optional[int] = BlipImageProcessor()
__A : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
__A : Any = BlipProcessor(_UpperCAmelCase , _UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).tokenizer
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).image_processor
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : List[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : int = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[str] = self.prepare_image_inputs()
__A : str = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : Dict = processor(text=_UpperCAmelCase)
__A : Tuple = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : int = self.prepare_image_inputs()
__A : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : List[str] = processor.batch_decode(_UpperCAmelCase)
__A : List[str] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : str = self.prepare_image_inputs()
__A : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask']) | 190 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any=13 , lowercase_ : Optional[int]=7 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Any=99 , lowercase_ : List[str]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=4 , lowercase_ : int=37 , lowercase_ : Any="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : str=False , lowercase_ : List[Any]=True , lowercase_ : Dict="None" , lowercase_ : Tuple=3 , lowercase_ : Any=4 , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : List[str] = is_training
lowercase_ : List[Any] = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : List[Any] = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : Any = num_choices
lowercase_ : Optional[int] = relative_attention
lowercase_ : Tuple = position_biased_input
lowercase_ : List[str] = pos_att_type
lowercase_ : Any = scope
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[Any] = None
if self.use_input_mask:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : str = None
lowercase_ : List[str] = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : str ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_config()
lowercase_ : int = 300
return config
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Tuple ):
lowercase_ : str = DebertaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
lowercase_ : Any = model(lowercase_ , token_type_ids=lowercase_ )[0]
lowercase_ : Any = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase_ : Any = DebertaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int ):
lowercase_ : List[Any] = self.num_labels
lowercase_ : Optional[Any] = DebertaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str ):
lowercase_ : List[Any] = self.num_labels
lowercase_ : List[Any] = DebertaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ):
lowercase_ : Any = DebertaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
(
lowercase_
) : List[Any] = config_and_inputs
lowercase_ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[int] = DebertaModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = DebertaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@unittest.skip(reason="""Model not available yet""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
lowercase_ : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase_ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : int = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
lowercase_ : str = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 360 | '''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ):
lowercase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ):
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ):
lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ):
lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Union[str, Any] = after_output[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Optional[int] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
lowercase_ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : List[str] = to_atuple(vision_model.config.image_size )
lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ):
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
lowercase_ : int = inputs_dict
lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : str = pt_model(**lowercase_ ).to_tuple()
lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
lowercase_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ):
lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : int = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" )
lowercase_ : int = config_inputs_dict.pop("""text_config""" )
lowercase_ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs()
lowercase_ : Dict = model_a(**lowercase_ )
lowercase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : str = model_a(**lowercase_ )
lowercase_ : Union[str, Any] = after_outputs[0]
lowercase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ )
lowercase_ : Dict = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = FlaxViTModelTester(self )
lowercase_ : Optional[Any] = FlaxBertModelTester(self )
lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : Tuple = random_attention_mask([batch_size, 4] )
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ )
lowercase_ : Any = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
lowercase_ : Tuple = FlaxBertModelTester(self )
lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" )
lowercase_ : List[str] = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
| 21 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Dict = logging.get_logger(__name__)
class A_ (a_ ):
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BICUBIC , _A = True , _A = True , _A = 1 / 2_5_5 , _A = None , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCAmelCase = get_size_dict(_A )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCAmelCase = get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = do_rescale
UpperCAmelCase = do_normalize
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = rescale_factor
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowercase ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(_A )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _lowercase ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def _lowercase ( self , _A , _A , _A = None , **_A ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def _lowercase ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _lowercase ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(_A )
if not is_batched(_A ):
UpperCAmelCase = [images]
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 273 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 1 |
from __future__ import annotations
from random import choice
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return choice(a__ )
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = random_pivot(a__ )
# partition based on pivot
# linear time
_UpperCamelCase = [e for e in lst if e < pivot]
_UpperCamelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a__ ) < k - 1:
return kth_number(a__ , k - len(a__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a__ , a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 63 | 0 |
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
lowerCamelCase__ : float = a
lowerCamelCase__ : float = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowerCamelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
lowerCamelCase__ : Optional[int] = mid
else:
lowerCamelCase__ : Optional[Any] = mid
lowerCamelCase__ : Any = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 50 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape
lowerCamelCase__ : Union[str, Any] = jax.image.resize(
UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : int = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : Tuple = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCamelCase__ : Dict = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = hidden_states
lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Any = self.conva(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) )
lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 )
lowerCamelCase__ : List[str] = hidden_states + temb
lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = self.conva(UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase )
return hidden_states + residual
| 50 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__magic_name__ : str = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__magic_name__ : Dict = model(lowerCAmelCase__ )["""last_hidden_state"""]
__magic_name__ : List[str] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice.
__magic_name__ : Tuple = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 138 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__: Tuple = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: int = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_UpperCAmelCase : str = """src/transformers"""
_UpperCAmelCase : Dict = """docs/source/en/tasks"""
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with open(UpperCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
snake_case_ = f.readlines()
# Find the start prompt.
snake_case_ = 0
while not lines[start_index].startswith(UpperCamelCase__ ):
start_index += 1
start_index += 1
snake_case_ = start_index
while not lines[end_index].startswith(UpperCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
_UpperCAmelCase : Dict = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_UpperCAmelCase : Union[str, Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = TASK_GUIDE_TO_MODELS[task_guide]
snake_case_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase__ , set() )
snake_case_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ , snake_case_ = _find_text_in_file(
filename=os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
snake_case_ = get_model_list_for_task(UpperCamelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 285 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (KDPMaDiscreteScheduler,)
lowerCamelCase = 10
def snake_case__ ( self : Optional[Any],**lowercase_ : str )-> Optional[int]:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001],[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_,beta_end=lowercase_ )
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : List[str] )-> str:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='v_prediction' )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps,device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if str(lowercase_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 282 |
import argparse
import struct
import unittest
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : bytes )-> None:
'''simple docstring'''
A__ = data
# Initialize hash values
A__ = [
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
A__ = [
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( lowercase_ : bytes )-> bytes:
'''simple docstring'''
A__ = B'\x80' + (B'\x00' * (6_3 - (len(lowercase_ ) + 8) % 6_4))
A__ = struct.pack('>Q',(len(lowercase_ ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self : Optional[int] )-> None:
'''simple docstring'''
A__ = [
self.preprocessed_data[x : x + 6_4]
for x in range(0,len(self.preprocessed_data ),6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L',lowercase_ ) )
# add 48 0-ed integers
words += [0] * 4_8
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0,6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 1_5],7 )
^ self.ror(words[index - 1_5],1_8 )
^ (words[index - 1_5] >> 3)
)
A__ = (
self.ror(words[index - 2],1_7 )
^ self.ror(words[index - 2],1_9 )
^ (words[index - 2] >> 1_0)
)
A__ = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A__ = self.ror(lowercase_,6 ) ^ self.ror(lowercase_,1_1 ) ^ self.ror(lowercase_,2_5 )
A__ = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A__ = self.ror(lowercase_,2 ) ^ self.ror(lowercase_,1_3 ) ^ self.ror(lowercase_,2_2 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(lowercase_ )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self : Union[str, Any],lowercase_ : int,lowercase_ : int )-> int:
'''simple docstring'''
return 0Xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> None:
'''simple docstring'''
import hashlib
A__ = bytes('Test String','utf-8' )
self.assertEqual(SHAaaa(lowercase_ ).hash,hashlib.shaaaa(lowercase_ ).hexdigest() )
def _snake_case( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash )
if __name__ == "__main__":
main()
| 282 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase ( lowercase ):
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case : List[str] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(_A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(_A , 'num_encoder_blocks'))
class UpperCamelCase :
def __init__(self : Tuple , _A : Tuple , _A : List[str]=13 , _A : List[Any]=64 , _A : List[str]=3 , _A : Dict=4 , _A : Any=[2, 2, 2, 2] , _A : Tuple=[8, 4, 2, 1] , _A : Tuple=[16, 32, 64, 1_28] , _A : Any=[1, 4, 8, 16] , _A : str=[1, 2, 4, 8] , _A : Any=True , _A : Tuple=True , _A : Dict="gelu" , _A : Dict=0.1 , _A : Dict=0.1 , _A : Union[str, Any]=0.02 , _A : Any=3 , _A : int=None , ) -> Any:
__snake_case : Optional[int] = parent
__snake_case : str = batch_size
__snake_case : Any = image_size
__snake_case : Tuple = num_channels
__snake_case : Optional[Any] = num_encoder_blocks
__snake_case : int = sr_ratios
__snake_case : Union[str, Any] = depths
__snake_case : str = hidden_sizes
__snake_case : List[str] = downsampling_rates
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Any = is_training
__snake_case : str = use_labels
__snake_case : Dict = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : str = initializer_range
__snake_case : Optional[Any] = num_labels
__snake_case : Optional[Any] = scope
def _lowercase (self : List[str]) -> Dict:
__snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase (self : List[Any]) -> str:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase (self : List[str] , _A : Optional[int] , _A : List[str] , _A : Any) -> Optional[int]:
__snake_case : List[Any] = SegformerModel(config=_A)
model.to(_A)
model.eval()
__snake_case : Tuple = model(_A)
__snake_case : List[str] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowercase (self : Any , _A : List[Any] , _A : Dict , _A : Any) -> Optional[Any]:
__snake_case : Dict = self.num_labels
__snake_case : Any = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
__snake_case : Any = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__snake_case : Optional[Any] = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Optional[Any] , _A : Optional[Any] , _A : List[str] , _A : str) -> str:
__snake_case : Any = 1
__snake_case : Dict = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
__snake_case : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
__snake_case : Union[str, Any] = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : int) -> Dict:
__snake_case : Any = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Any = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = False
def _lowercase (self : Optional[Any]) -> int:
__snake_case : List[Any] = SegformerModelTester(self)
__snake_case : Optional[Any] = SegformerConfigTester(self , config_class=_A)
def _lowercase (self : str) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase (self : Union[str, Any]) -> List[str]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Dict) -> Optional[Any]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def _lowercase (self : str) -> Union[str, Any]:
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowercase (self : Dict) -> int:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowercase (self : Tuple) -> Optional[int]:
pass
def _lowercase (self : Union[str, Any]) -> str:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(_A)
__snake_case : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _lowercase (self : str) -> Optional[Any]:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
for model_class in self.all_model_classes:
__snake_case : Dict = True
__snake_case : Optional[int] = False
__snake_case : str = True
__snake_case : Optional[int] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(_A , _A))
__snake_case : int = outputs.attentions
__snake_case : Any = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Dict = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_A , _A))
__snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Optional[int] = (self.model_tester.image_size // 4) ** 2
__snake_case : Optional[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case : Any = (self.model_tester.image_size // 32) ** 2
__snake_case : str = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case : Tuple = len(_A)
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : List[str] = True
__snake_case : Optional[Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
__snake_case : Dict = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : int = (self.model_tester.image_size // 4) ** 2
__snake_case : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowercase (self : List[Any]) -> int:
def check_hidden_states_output(_A : Tuple , _A : List[str] , _A : Dict):
__snake_case : int = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(_A , _A))
__snake_case : int = outputs.hidden_states
__snake_case : Tuple = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(_A , _A , _A)
def _lowercase (self : Optional[Any]) -> List[Any]:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.train()
__snake_case : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A)
__snake_case : Tuple = model(**_A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowercase (self : Union[str, Any]) -> str:
pass
@slow
def _lowercase (self : Dict) -> Any:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : Dict) -> str:
# only resize + normalize
__snake_case : List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Tuple = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : Dict = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Tuple = model(_A)
__snake_case : Optional[int] = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Union[str, Any] = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4))
@slow
def _lowercase (self : str) -> int:
# only resize + normalize
__snake_case : Any = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : int = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(_A)
__snake_case : Tuple = prepare_img()
__snake_case : Optional[int] = image_processor(images=_A , return_tensors='pt')
__snake_case : int = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : str = model(_A)
__snake_case : Tuple = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Dict = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1))
@slow
def _lowercase (self : int) -> List[Any]:
# only resize + normalize
__snake_case : Optional[int] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Dict = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : List[str] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : int = model(_A)
__snake_case : List[Any] = outputs.logits.detach().cpu()
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_00, 3_00)])
__snake_case : str = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , _A)
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_A)
__snake_case : Tuple = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , _A)
| 172 | """simple docstring"""
_a : Tuple= 8.3_1_4_4_5_9_8
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a : Any= 300
_a : Optional[Any]= 28
_a : Optional[int]= rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 172 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="pt" )-> str:
"""simple docstring"""
snake_case_ = {'''add_prefix_space''': True} if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not line.startswith(''' ''' ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE , padding='''max_length''' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , )-> List[str]:
"""simple docstring"""
snake_case_ = input_ids.ne(SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="train" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="" , ):
super().__init__()
snake_case_ = Path(_UpperCAmelCase ).joinpath(type_path + '''.source''' )
snake_case_ = Path(_UpperCAmelCase ).joinpath(type_path + '''.target''' )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _UpperCAmelCase ):
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , _UpperCAmelCase ).rstrip('''\n''' )
snake_case_ = linecache.getline(str(self.tgt_file ) , _UpperCAmelCase ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
snake_case_ = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_source_length , '''right''' )
snake_case_ = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_target_length , '''right''' )
snake_case_ = source_inputs['''input_ids'''].squeeze()
snake_case_ = target_inputs['''input_ids'''].squeeze()
snake_case_ = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase ):
return [len(_UpperCAmelCase ) for x in Path(_UpperCAmelCase ).open().readlines()]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = torch.stack([x['''input_ids'''] for x in batch] )
snake_case_ = torch.stack([x['''attention_mask'''] for x in batch] )
snake_case_ = torch.stack([x['''decoder_input_ids'''] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ , snake_case_ = trim_batch(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase )
snake_case_ = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> None:
"""simple docstring"""
snake_case_ = get_git_info()
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''git_log.json''' ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=4 , **SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
snake_case_ = {
'''repo_id''': str(SCREAMING_SNAKE_CASE ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List:
"""simple docstring"""
return list(map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = normalize_answer(SCREAMING_SNAKE_CASE ).split()
snake_case_ = normalize_answer(SCREAMING_SNAKE_CASE ).split()
snake_case_ = Counter(SCREAMING_SNAKE_CASE ) & Counter(SCREAMING_SNAKE_CASE )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
snake_case_ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
em += exact_match_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
em /= len(SCREAMING_SNAKE_CASE )
return {"em": em}
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = '''dropout_rate'''
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not hasattr(SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
snake_case_ = p if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return hparams, config | 267 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(_UpperCAmelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
snake_case_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ = sd_pipe(**_UpperCAmelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9 | 267 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = tempfile.mkdtemp()
# fmt: off
__A : Optional[int] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : List[str] = {'unk_token': '<unk>'}
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
__A : str = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.get_tokenizer()
__A : int = self.get_rust_tokenizer()
__A : Any = self.get_image_processor()
__A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
__A : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
__A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
__A : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Any = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[str] = self.prepare_image_inputs()
__A : Dict = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : List[Any] = processor(text=_UpperCAmelCase)
__A : Optional[int] = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : int = 'lower newer'
__A : Any = self.prepare_image_inputs()
__A : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : str = processor.batch_decode(_UpperCAmelCase)
__A : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = 'lower newer'
__A : Any = self.prepare_image_inputs()
__A : Any = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names) | 190 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase__ , os.listdir(lowerCAmelCase__ )[0] , 'snapshots' ) )]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase__ ) == num_samples
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , use_memory_efficient_attention=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = replicate(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = shard(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 38 |
"""simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return index | (index + 1)
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return (index & (index + 1)) - 1
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = value
while index < self.size:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_next(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE = 0
while left <= right:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ )
if left <= current_left:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.tree[right] )
SCREAMING_SNAKE_CASE = current_left
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=0 ):
'''simple docstring'''
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[column] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=float("inf" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE = current_dis
return min_dis
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=float("inf" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase_ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE = current_dis
return min_dis
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase_ , lowerCAmelCase_ )
# recursion
__SCREAMING_SNAKE_CASE = points_counts // 2
__SCREAMING_SNAKE_CASE = closest_pair_of_points_sqr(
lowerCAmelCase_ , points_sorted_on_y[:mid] , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = closest_pair_of_points_sqr(
lowerCAmelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = dis_between_closest_in_strip(
lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return min(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = column_based_sort(lowerCAmelCase_ , column=0 )
__SCREAMING_SNAKE_CASE = column_based_sort(lowerCAmelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
) ** 0.5
if __name__ == "__main__":
a__ : List[str] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 54 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase: Any = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ : bool = field(default=snake_case , metadata={"help": "Whether tp freeze the encoder."} )
SCREAMING_SNAKE_CASE_ : bool = field(default=snake_case , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=snake_case , metadata={"help": "Source language id for translation."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=snake_case , metadata={"help": "Target language id for translation."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=snake_case , metadata={"help": "# num_beams to use for evaluation."} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=snake_case , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , F"""{split}_results.json""" ) )
def __SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(__UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase : Tuple = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(__UpperCAmelCase , __UpperCAmelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
_lowercase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowercase : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowercase : Dict = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowercase : Optional[int] = SeqaSeqDataset
# Get datasets
_lowercase : int = (
dataset_class(
__UpperCAmelCase , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowercase : int = (
dataset_class(
__UpperCAmelCase , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowercase : Tuple = (
dataset_class(
__UpperCAmelCase , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , __UpperCAmelCase ) if training_args.predict_with_generate else None
)
_lowercase : Tuple = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , data_args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , data_collator=SeqaSeqDataCollator(
__UpperCAmelCase , __UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
_lowercase : int = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowercase : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowercase : Optional[int] = train_result.metrics
_lowercase : Tuple = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , __UpperCAmelCase , training_args.output_dir )
all_metrics.update(__UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowercase : List[Any] = trainer.evaluate(metric_key_prefix="""val""" )
_lowercase : Any = data_args.n_val
_lowercase : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , __UpperCAmelCase , training_args.output_dir )
all_metrics.update(__UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowercase : Any = trainer.predict(test_dataset=__UpperCAmelCase , metric_key_prefix="""test""" )
_lowercase : int = test_output.metrics
_lowercase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowercase : Union[str, Any] = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , __UpperCAmelCase , training_args.output_dir )
all_metrics.update(__UpperCAmelCase )
if training_args.predict_with_generate:
_lowercase : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
_lowercase : Any = lmap(str.strip , __UpperCAmelCase )
write_txt_file(__UpperCAmelCase , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(__UpperCAmelCase , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 336 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCamelCase__ : List[Any] = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase__ : Optional[Any] = file_path.read_text(encoding='''utf-8''' )
else:
UpperCamelCase__ : Optional[int] = output_path.read_text(encoding='''utf-8''' )
UpperCamelCase__ : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
UpperCamelCase__ : Optional[Any] = input_paths[compression_format]
if input_path is None:
UpperCamelCase__ : Tuple = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = Extractor.infer_extractor_format(SCREAMING_SNAKE_CASE )
assert extractor_format is not None
UpperCamelCase__ : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase__ : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
UpperCamelCase__ : str = output_path.read_text(encoding='''utf-8''' )
UpperCamelCase__ : Any = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
import tarfile
UpperCamelCase__ : str = tmp_path / '''data_dot_dot'''
directory.mkdir()
UpperCamelCase__ : Any = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
import tarfile
UpperCamelCase__ : Optional[int] = tmp_path / '''data_sym_link'''
directory.mkdir()
UpperCamelCase__ : str = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=SCREAMING_SNAKE_CASE )
with tarfile.TarFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : Any = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
UpperCamelCase__ : int = insecure_tar_files[insecure_tar_file]
UpperCamelCase__ : List[str] = tmp_path / '''extracted'''
TarExtractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : Dict = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
UpperCamelCase__ : Tuple = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(SCREAMING_SNAKE_CASE ) # but we're right
| 146 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _a ( SCREAMING_SNAKE_CASE : str = "dhaka" , SCREAMING_SNAKE_CASE : int = 5 ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCamelCase__ : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase__ : List[str] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase__ : Union[str, Any] = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase__ : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = json.loads(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCamelCase__ : Optional[Any] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : List[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCamelCase__ : Optional[int] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = urllib.request.build_opener()
UpperCamelCase__ : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCamelCase : List[Any] = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 146 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> List[str]:
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase_ :
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict=13 , snake_case_ : Any=7 , snake_case_ : Dict=True , snake_case_ : List[str]=False , snake_case_ : Optional[int]=99 , snake_case_ : str=16 , snake_case_ : List[Any]=2 , snake_case_ : str=4 , snake_case_ : List[Any]=4 , snake_case_ : str="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=20 , snake_case_ : Any=2 , snake_case_ : Optional[int]=1 , snake_case_ : Optional[Any]=0 , snake_case_ : int=16 , snake_case_ : List[Any]=16 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
A__ = embed_dim
A__ = word_embed_proj_dim
A__ = False
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case_ , **self.config_updates , )
A__ = prepare_opt_inputs_dict(snake_case_ , snake_case_ )
return config, inputs_dict
def __magic_name__ ( self : int , snake_case_ : Optional[int] , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ = TFOPTModel(config=snake_case_ )
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = 1
# first forward pass
A__ = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
A__, A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(snake_case_ , attention_mask=snake_case_ )[0]
A__ = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
@require_tf
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def __magic_name__ ( self : Dict ) -> str:
'''simple docstring'''
A__ = TFOPTModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Any ) -> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case_ : str , snake_case_ : List[str] ):
if hasattr(snake_case_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
A__ = model_class(config=snake_case_ )
A__ = _get_word_embedding_weight(snake_case_ , model.get_input_embeddings() )
A__ = _get_word_embedding_weight(snake_case_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case_ )
A__ = _get_word_embedding_weight(snake_case_ , model.get_input_embeddings() )
A__ = _get_word_embedding_weight(snake_case_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
A__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case_ )
# check that weights remain the same after resizing
A__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A__ = False
self.assertTrue(snake_case_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case_ )
A__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A__ = False
self.assertTrue(snake_case_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
return tf.constant(lowercase_ , dtype=tf.intaa )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
lowercase__ = 99
def __magic_name__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
A__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
A__ = input_ids.shape[0]
A__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ = TFOPTModel.from_pretrained("facebook/opt-350m" )
A__ = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
A__ = tf.not_equal(snake_case_ , model.config.pad_token_id )
with tf.GradientTape():
A__ = model(input_ids=snake_case_ , attention_mask=snake_case_ ).last_hidden_state
A__ = (1, 11, 512)
self.assertEqual(output.shape , snake_case_ )
A__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=4e-3 ) )
A__ = tf.function(snake_case_ , jit_compile=snake_case_ )
A__ = xla_generate(snake_case_ , snake_case_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=4e-2 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
super().setUp()
A__ = "facebook/opt-350m"
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = TFOPTForCausalLM.from_pretrained(self.path_model )
A__ = GPTaTokenizer.from_pretrained(self.path_model )
A__ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
A__ = tokenizer(snake_case_ , return_tensors="tf" , padding=snake_case_ , add_special_tokens=snake_case_ )
A__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
A__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-4 ) )
A__ = tf.function(snake_case_ , jit_compile=snake_case_ )
A__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-4 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
@property
def __magic_name__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ = "facebook/opt-125m"
A__ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
A__ = []
A__ = GPTaTokenizer.from_pretrained(snake_case_ )
A__ = TFOPTForCausalLM.from_pretrained(snake_case_ )
for prompt in self.prompts:
A__ = tokenizer(snake_case_ , return_tensors="tf" ).input_ids
A__ = model.generate(snake_case_ , max_length=10 )
A__ = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
predicted_outputs += generated_string
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ = "facebook/opt-350m"
A__ = GPTaTokenizer.from_pretrained(snake_case_ )
A__ = TFOPTForCausalLM.from_pretrained(snake_case_ )
A__ = "left"
# use different length sentences to test batching
A__ = [
"Hello, my dog is a little",
"Today, I",
]
A__ = tokenizer(snake_case_ , return_tensors="tf" , padding=snake_case_ )
A__ = inputs["input_ids"]
A__ = model.generate(input_ids=snake_case_ , attention_mask=inputs["attention_mask"] )
A__ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
A__ = model.generate(input_ids=snake_case_ )
A__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
A__ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
A__ = model.generate(input_ids=snake_case_ , max_length=model.config.max_length - num_paddings )
A__ = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
A__ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
def __magic_name__ ( self : int ) -> Dict:
'''simple docstring'''
A__ = "facebook/opt-350m"
A__ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
A__ = []
A__ = GPTaTokenizer.from_pretrained(snake_case_ )
A__ = TFOPTForCausalLM.from_pretrained(snake_case_ )
for prompt in self.prompts:
A__ = tokenizer(snake_case_ , return_tensors="tf" ).input_ids
A__ = model.generate(snake_case_ , max_length=10 )
A__ = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
predicted_outputs += generated_string
self.assertListEqual(snake_case_ , snake_case_ )
| 230 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
A__ = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
A__ = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase_ )
# Let's go
A__ = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
A__ = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 230 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 244 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : str = Dict[str, Any]
__UpperCAmelCase : int = List[Prediction]
@add_end_docstrings(__lowerCamelCase )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : int , *A : Optional[int] , **A : Optional[int] ):
super().__init__(*A , **A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCAmelCase__ ( self : List[str] , **A : Tuple ):
__snake_case: List[str] = {}
if "threshold" in kwargs:
__snake_case: Optional[Any] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : int , *A : Optional[Any] , **A : Tuple ):
return super().__call__(*A , **A )
def UpperCAmelCase__ ( self : Optional[int] , A : str ):
__snake_case: Optional[Any] = load_image(A )
__snake_case: Dict = torch.IntTensor([[image.height, image.width]] )
__snake_case: str = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__snake_case: Optional[Any] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__snake_case: Any = target_size
return inputs
def UpperCAmelCase__ ( self : Optional[int] , A : Dict ):
__snake_case: int = model_inputs.pop("""target_size""" )
__snake_case: int = self.model(**A )
__snake_case: Any = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__snake_case: Optional[int] = model_inputs["""bbox"""]
return model_outputs
def UpperCAmelCase__ ( self : List[Any] , A : Optional[int] , A : Union[str, Any]=0.9 ):
__snake_case: Optional[Any] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__snake_case , __snake_case: Union[str, Any] = target_size[0].tolist()
def unnormalize(A : Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
__snake_case , __snake_case: Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__snake_case: List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__snake_case: int = [unnormalize(A ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__snake_case: int = ["""score""", """label""", """box"""]
__snake_case: List[Any] = [dict(zip(A , A ) ) for vals in zip(scores.tolist() , A , A ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__snake_case: Tuple = self.image_processor.post_process_object_detection(A , A , A )
__snake_case: Optional[Any] = raw_annotations[0]
__snake_case: int = raw_annotation["""scores"""]
__snake_case: int = raw_annotation["""labels"""]
__snake_case: Optional[Any] = raw_annotation["""boxes"""]
__snake_case: Union[str, Any] = scores.tolist()
__snake_case: List[str] = [self.model.config.idalabel[label.item()] for label in labels]
__snake_case: List[str] = [self._get_bounding_box(A ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__snake_case: List[Any] = ["""score""", """label""", """box"""]
__snake_case: Dict = [
dict(zip(A , A ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCAmelCase__ ( self : Optional[Any] , A : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__snake_case , __snake_case , __snake_case , __snake_case: Union[str, Any] = box.int().tolist()
__snake_case: Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 111 | 0 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_snake_case : List[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def A__ ( ):
A = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A = get_sagemaker_input()
else:
A = get_cluster_input()
return config
def A__ ( UpperCamelCase=None ):
if subparsers is not None:
A = subparsers.add_parser("config" , description=UpperCamelCase )
else:
A = argparse.ArgumentParser("Accelerate config command" , description=UpperCamelCase )
parser.add_argument(
"--config_file" , default=UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def A__ ( UpperCamelCase ):
A = get_user_input()
if args.config_file is not None:
A = args.config_file
else:
if not os.path.isdir(UpperCamelCase ):
os.makedirs(UpperCamelCase )
A = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(UpperCamelCase )
else:
config.to_yaml_file(UpperCamelCase )
print(F"accelerate configuration saved at {config_file}" )
def A__ ( ):
A = config_command_parser()
A = parser.parse_args()
config_command(UpperCamelCase )
if __name__ == "__main__":
main()
| 292 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 1 |
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = 0
while number > 0:
UpperCAmelCase__ = number % 10
sum_of_digits += last_digit
UpperCAmelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase_ ( __A = 100 ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = factorial(lowerCAmelCase__ )
UpperCAmelCase__ = split_and_add(lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 366 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"""
f""" {type(__UpperCAmelCase )}""" )
UpperCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ = additional_special_tokens_extended
else:
UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 143 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE :int = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = ['''ConvNextFeatureExtractor''']
SCREAMING_SNAKE_CASE :Optional[Any] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :str = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 159 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_snake_case = logging.get_logger(__name__)
_snake_case = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = '''longformer'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[List[int], int] = 512 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 30522 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : int = 12 , UpperCAmelCase__ : int = 12 , UpperCAmelCase__ : int = 3072 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : float = 0.0_2 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[int] , ) -> Dict:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : str = attention_window
_a : int = sep_token_id
_a : List[str] = bos_token_id
_a : int = eos_token_id
_a : Optional[int] = vocab_size
_a : List[Any] = hidden_size
_a : Any = num_hidden_layers
_a : Any = num_attention_heads
_a : str = hidden_act
_a : Optional[int] = intermediate_size
_a : Any = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Tuple = type_vocab_size
_a : int = initializer_range
_a : int = layer_norm_eps
_a : Optional[Any] = onnx_export
class UpperCamelCase ( snake_case_ ):
def __init__( self : Dict , UpperCAmelCase__ : "PretrainedConfig" , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : "List[PatchingSpec]" = None ) -> Dict:
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = True
@property
def _lowercase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _lowercase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
_a : int = super().outputs
if self.task == "default":
_a : Union[str, Any] = {0: """batch"""}
return outputs
@property
def _lowercase ( self : Union[str, Any] ) -> float:
return 1E-4
@property
def _lowercase ( self : Tuple ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _lowercase ( self : Any , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_a : str = super().generate_dummy_inputs(
preprocessor=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_a : Optional[Any] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
_a : Tuple = 1
return inputs
| 324 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_a : Optional[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase__ )
_a : Optional[Any] = PegasusConfig(**UpperCamelCase__ )
_a : Tuple = PegasusForConditionalGeneration(UpperCamelCase__ )
_a : str = torch_model.model.state_dict()
_a : Union[str, Any] = {}
for k, v in tf_weights.items():
_a : Any = rename_state_dict_key(UpperCamelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_a : str = v.T
_a : int = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_a : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_a : str = mapping["""shared.weight"""]
_a : Union[str, Any] = mapping["""shared.weight"""]
_a : Optional[Any] = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase__ )
_a , _a : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_a : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase__ ( UpperCamelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_a : List[Any] = tf.train.list_variables(UpperCamelCase__ )
_a : Optional[int] = {}
_a : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
_a : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_a : int = array
return tf_weights
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# save tokenizer first
_a : Dict = Path(UpperCamelCase__ ).parent.name
_a : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
_a : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase__ )
# convert model
_a : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
_a : Dict = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_a : Tuple = task_specific_params
_a : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
_a : Dict = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 324 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE : Optional[int] = sys.version_info >= (3, 10)
def UpperCamelCase_( lowerCamelCase_=None , lowerCamelCase_=None ) -> Optional[int]:
return field(default_factory=lambda: default , metadata=lowerCamelCase_ )
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : float
lowercase_ : str
lowercase_ : bool
@dataclass
class _lowerCamelCase:
lowercase_ : int = 42
lowercase_ : str = field(default="""toto""", metadata={"""help""": """help message"""} )
@dataclass
class _lowerCamelCase:
lowercase_ : bool = False
lowercase_ : bool = True
lowercase_ : Optional[bool] = None
class _lowerCamelCase( _a ):
lowercase_ : Tuple = """titi"""
lowercase_ : Optional[Any] = """toto"""
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = """titi"""
lowercase_ : Any = """toto"""
lowercase_ : List[str] = 42
@dataclass
class _lowerCamelCase:
lowercase_ : BasicEnum = "toto"
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Dict = BasicEnum(self.foo)
@dataclass
class _lowerCamelCase:
lowercase_ : MixedTypeEnum = "toto"
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = MixedTypeEnum(self.foo)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[int] = None
lowercase_ : Optional[float] = field(default=_a, metadata={"""help""": """help message"""} )
lowercase_ : Optional[str] = None
lowercase_ : Optional[List[str]] = list_field(default=[] )
lowercase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _lowerCamelCase:
lowercase_ : List[int] = list_field(default=[] )
lowercase_ : List[int] = list_field(default=[1, 2, 3] )
lowercase_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
lowercase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowerCamelCase:
lowercase_ : List[int] = field()
lowercase_ : str = field()
lowercase_ : BasicEnum = field()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = BasicEnum(self.required_enum)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : "BasicEnum" = field()
lowercase_ : "Optional[bool]" = None
lowercase_ : "str" = field(default="""toto""", metadata={"""help""": """help message"""} )
lowercase_ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _lowerCamelCase:
lowercase_ : bool = False
lowercase_ : bool = True
lowercase_ : bool | None = None
@dataclass
class _lowerCamelCase:
lowercase_ : int | None = None
lowercase_ : float | None = field(default=_a, metadata={"""help""": """help message"""} )
lowercase_ : str | None = None
lowercase_ : list[str] | None = list_field(default=[] )
lowercase_ : list[int] | None = list_field(default=[] )
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
self.assertEqual(len(a._actions), len(b._actions))
for x, y in zip(a._actions, b._actions):
_lowercase : str = {k: v for k, v in vars(lowerCamelCase).items() if k != 'container'}
_lowercase : Any = {k: v for k, v in vars(lowerCamelCase).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices', lowerCamelCase) and yy.get('choices', lowerCamelCase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowerCamelCase), yy['type'](lowerCamelCase))
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : List[str] = HfArgumentParser(lowerCamelCase)
_lowercase : str = argparse.ArgumentParser()
expected.add_argument('--foo', type=lowerCamelCase, required=lowerCamelCase)
expected.add_argument('--bar', type=lowerCamelCase, required=lowerCamelCase)
expected.add_argument('--baz', type=lowerCamelCase, required=lowerCamelCase)
expected.add_argument('--flag', type=lowerCamelCase, default=lowerCamelCase, const=lowerCamelCase, nargs='?')
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((_lowercase) , ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowerCamelCase, look_for_args_file=lowerCamelCase)
self.assertFalse(example.flag)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Dict = HfArgumentParser(lowerCamelCase)
_lowercase : Any = argparse.ArgumentParser()
expected.add_argument('--foo', default=42, type=lowerCamelCase)
expected.add_argument('--baz', default='toto', type=lowerCamelCase, help='help message')
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = argparse.ArgumentParser()
expected.add_argument('--foo', type=lowerCamelCase, default=lowerCamelCase, const=lowerCamelCase, nargs='?')
expected.add_argument('--baz', type=lowerCamelCase, default=lowerCamelCase, const=lowerCamelCase, nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz', action='store_false', default=lowerCamelCase, dest='baz')
expected.add_argument('--opt', type=lowerCamelCase, default=lowerCamelCase)
_lowercase : Optional[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase)
for dataclass_type in dataclass_types:
_lowercase : List[str] = HfArgumentParser(lowerCamelCase)
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = parser.parse_args([])
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase))
_lowercase : List[str] = parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase))
_lowercase : Tuple = parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase))
_lowercase : Optional[int] = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase))
_lowercase : Union[str, Any] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, baz=lowerCamelCase, opt=lowerCamelCase))
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = HfArgumentParser(lowerCamelCase)
_lowercase : Any = argparse.ArgumentParser()
expected.add_argument(
'--foo', default='toto', choices=['titi', 'toto', 42], type=make_choice_type_function(['titi', 'toto', 42]), )
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = parser.parse_args([])
self.assertEqual(args.foo, 'toto')
_lowercase : Any = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.toto)
_lowercase : str = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo, 'titi')
_lowercase : Optional[Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.titi)
_lowercase : Optional[int] = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo, 42)
_lowercase : List[str] = parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
@dataclass
class _lowerCamelCase:
lowercase_ : Literal["titi", "toto", 42] = "toto"
_lowercase : Union[str, Any] = HfArgumentParser(lowerCamelCase)
_lowercase : Dict = argparse.ArgumentParser()
expected.add_argument(
'--foo', default='toto', choices=('titi', 'toto', 42), type=make_choice_type_function(['titi', 'toto', 42]), )
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = parser.parse_args([])
self.assertEqual(args.foo, 'toto')
_lowercase : List[Any] = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo, 'titi')
_lowercase : Any = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo, 42)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = HfArgumentParser(lowerCamelCase)
_lowercase : int = argparse.ArgumentParser()
expected.add_argument('--foo_int', nargs='+', default=[], type=lowerCamelCase)
expected.add_argument('--bar_int', nargs='+', default=[1, 2, 3], type=lowerCamelCase)
expected.add_argument('--foo_str', nargs='+', default=['Hallo', 'Bonjour', 'Hello'], type=lowerCamelCase)
expected.add_argument('--foo_float', nargs='+', default=[0.1, 0.2, 0.3], type=lowerCamelCase)
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = parser.parse_args([])
self.assertEqual(
lowerCamelCase, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=['Hallo', 'Bonjour', 'Hello'], foo_float=[0.1, 0.2, 0.3]), )
_lowercase : List[str] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowerCamelCase, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=['a', 'b', 'c'], foo_float=[0.1, 0.7]))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = argparse.ArgumentParser()
expected.add_argument('--foo', default=lowerCamelCase, type=lowerCamelCase)
expected.add_argument('--bar', default=lowerCamelCase, type=lowerCamelCase, help='help message')
expected.add_argument('--baz', default=lowerCamelCase, type=lowerCamelCase)
expected.add_argument('--ces', nargs='+', default=[], type=lowerCamelCase)
expected.add_argument('--des', nargs='+', default=[], type=lowerCamelCase)
_lowercase : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase)
for dataclass_type in dataclass_types:
_lowercase : Dict = HfArgumentParser(lowerCamelCase)
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = parser.parse_args([])
self.assertEqual(lowerCamelCase, Namespace(foo=lowerCamelCase, bar=lowerCamelCase, baz=lowerCamelCase, ces=[], des=[]))
_lowercase : Union[str, Any] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowerCamelCase, Namespace(foo=12, bar=3.1_4, baz='42', ces=['a', 'b', 'c'], des=[1, 2, 3]))
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = HfArgumentParser(lowerCamelCase)
_lowercase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--required_list', nargs='+', type=lowerCamelCase, required=lowerCamelCase)
expected.add_argument('--required_str', type=lowerCamelCase, required=lowerCamelCase)
expected.add_argument(
'--required_enum', type=make_choice_type_function(['titi', 'toto']), choices=['titi', 'toto'], required=lowerCamelCase, )
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = HfArgumentParser(lowerCamelCase)
_lowercase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo', type=lowerCamelCase, required=lowerCamelCase)
expected.add_argument(
'--required_enum', type=make_choice_type_function(['titi', 'toto']), choices=['titi', 'toto'], required=lowerCamelCase, )
expected.add_argument('--opt', type=lowerCamelCase, default=lowerCamelCase)
expected.add_argument('--baz', default='toto', type=lowerCamelCase, help='help message')
expected.add_argument('--foo_str', nargs='+', default=['Hallo', 'Bonjour', 'Hello'], type=lowerCamelCase)
self.argparsersEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = HfArgumentParser(lowerCamelCase)
_lowercase : Any = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
_lowercase : Tuple = parser.parse_dict(lowerCamelCase)[0]
_lowercase : Dict = BasicExample(**lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = HfArgumentParser(lowerCamelCase)
_lowercase : Any = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowerCamelCase, parser.parse_dict, lowerCamelCase, allow_extra_keys=lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = HfArgumentParser(lowerCamelCase)
_lowercase : Tuple = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Optional[Any] = os.path.join(lowerCamelCase, 'temp_json')
os.mkdir(lowerCamelCase)
with open(temp_local_path + '.json', 'w+') as f:
json.dump(lowerCamelCase, lowerCamelCase)
_lowercase : int = parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
_lowercase : int = BasicExample(**lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : int = HfArgumentParser(lowerCamelCase)
_lowercase : Tuple = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : List[Any] = os.path.join(lowerCamelCase, 'temp_yaml')
os.mkdir(lowerCamelCase)
with open(temp_local_path + '.yaml', 'w+') as f:
yaml.dump(lowerCamelCase, lowerCamelCase)
_lowercase : str = parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
_lowercase : Any = BasicExample(**lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = HfArgumentParser(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
| 21 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
__UpperCAmelCase = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
__UpperCAmelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
__UpperCAmelCase = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 42 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase ( __magic_name__ : List[Any] ):
if hor == 128:
a__: Union[str, Any] =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: Optional[int] =(32, 128, 256)
a__: Tuple =("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a__: Tuple =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: List[Any] =(32, 64, 128, 256)
a__: List[str] =("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a__: Dict =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
a__: Optional[int] =model.state_dict()
a__: str ={
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a__: List[Any] =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: Tuple =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: Dict =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def __lowerCamelCase ( ):
a__: Union[str, Any] ={
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a__: Union[str, Any] =torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
a__: Any =model
a__: str =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: List[str] =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: List[Any] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class A_ (lowercase__ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE__ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
SCREAMING_SNAKE_CASE__ : str = "question"
SCREAMING_SNAKE_CASE__ : str = "context"
SCREAMING_SNAKE_CASE__ : str = "answers"
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 61 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__UpperCAmelCase ) ),
} , features=__UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return filename
# FILE_CONTENT + files
lowerCamelCase__ : List[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
SCREAMING_SNAKE_CASE_ = FILE_CONTENT
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with gzip.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lza.frame.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__UpperCAmelCase , 'w' ) as archive:
archive.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> str:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[Any]:
import lzma
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lzma.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> str:
import zipfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with zstd.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
SCREAMING_SNAKE_CASE_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
lowerCamelCase__ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCamelCase__ : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCamelCase__ : Optional[int] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase__ : List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCamelCase__ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> str:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
SCREAMING_SNAKE_CASE_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__UpperCAmelCase , 'wb' ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCAmelCase ) )] for k in DATA[0]} , schema=__UpperCAmelCase )
writer.write_table(__UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA_DICT_OF_LISTS}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[str]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> int:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir | 225 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__snake_case :Dict = logging.get_logger(__name__)
# General docstring
__snake_case :List[str] = '''ResNetConfig'''
# Base docstring
__snake_case :Dict = '''microsoft/resnet-50'''
__snake_case :Optional[Any] = [1, 2048, 7, 7]
# Image classification docstring
__snake_case :List[Any] = '''microsoft/resnet-50'''
__snake_case :List[Any] = '''tiger cat'''
__snake_case :Tuple = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _A ( nn.Module ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "relu"):
'''simple docstring'''
super().__init__()
__a = nn.Convad(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=__SCREAMING_SNAKE_CASE)
__a = nn.BatchNormad(__SCREAMING_SNAKE_CASE)
__a = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = self.convolution(__SCREAMING_SNAKE_CASE)
__a = self.normalization(__SCREAMING_SNAKE_CASE)
__a = self.activation(__SCREAMING_SNAKE_CASE)
return hidden_state
class _A ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : ResNetConfig):
'''simple docstring'''
super().__init__()
__a = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
__a = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
__a = config.num_channels
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
__a = self.embedder(__SCREAMING_SNAKE_CASE)
__a = self.pooler(__SCREAMING_SNAKE_CASE)
return embedding
class _A ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2):
'''simple docstring'''
super().__init__()
__a = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , stride=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE)
__a = nn.BatchNormad(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = self.convolution(__SCREAMING_SNAKE_CASE)
__a = self.normalization(__SCREAMING_SNAKE_CASE)
return hidden_state
class _A ( nn.Module ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "relu"):
'''simple docstring'''
super().__init__()
__a = in_channels != out_channels or stride != 1
__a = (
ResNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE) if should_apply_shortcut else nn.Identity()
)
__a = nn.Sequential(
ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE) , ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , activation=__SCREAMING_SNAKE_CASE) , )
__a = ACTaFN[activation]
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = hidden_state
__a = self.layer(__SCREAMING_SNAKE_CASE)
__a = self.shortcut(__SCREAMING_SNAKE_CASE)
hidden_state += residual
__a = self.activation(__SCREAMING_SNAKE_CASE)
return hidden_state
class _A ( nn.Module ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "relu" , __SCREAMING_SNAKE_CASE : int = 4):
'''simple docstring'''
super().__init__()
__a = in_channels != out_channels or stride != 1
__a = out_channels // reduction
__a = (
ResNetShortCut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE) if should_apply_shortcut else nn.Identity()
)
__a = nn.Sequential(
ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1) , ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE) , ResNetConvLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 , activation=__SCREAMING_SNAKE_CASE) , )
__a = ACTaFN[activation]
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = hidden_state
__a = self.layer(__SCREAMING_SNAKE_CASE)
__a = self.shortcut(__SCREAMING_SNAKE_CASE)
hidden_state += residual
__a = self.activation(__SCREAMING_SNAKE_CASE)
return hidden_state
class _A ( nn.Module ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : ResNetConfig , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , ):
'''simple docstring'''
super().__init__()
__a = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
__a = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , activation=config.hidden_act) , *[layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , activation=config.hidden_act) for _ in range(depth - 1)] , )
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = input
for layer in self.layers:
__a = layer(__SCREAMING_SNAKE_CASE)
return hidden_state
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : ResNetConfig):
'''simple docstring'''
super().__init__()
__a = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
__a = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(__SCREAMING_SNAKE_CASE , config.depths[1:]):
self.stages.append(ResNetStage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , depth=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True):
'''simple docstring'''
__a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a = hidden_states + (hidden_state,)
__a = stage_module(__SCREAMING_SNAKE_CASE)
if output_hidden_states:
__a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE , )
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Any = ResNetConfig
UpperCamelCase__ : List[Any] = '''resnet'''
UpperCamelCase__ : List[Any] = '''pixel_values'''
UpperCamelCase__ : str = True
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''')
elif isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=False):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = value
__snake_case :Union[str, Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__snake_case :List[str] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = config
__a = ResNetEmbeddings(__SCREAMING_SNAKE_CASE)
__a = ResNetEncoder(__SCREAMING_SNAKE_CASE)
__a = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None):
'''simple docstring'''
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.embedder(__SCREAMING_SNAKE_CASE)
__a = self.encoder(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE)
__a = encoder_outputs[0]
__a = self.pooler(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , pooler_output=__SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = config.num_labels
__a = ResNetModel(__SCREAMING_SNAKE_CASE)
# classification head
__a = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.LongTensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
'''simple docstring'''
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.resnet(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE)
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier(__SCREAMING_SNAKE_CASE)
__a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a = '''single_label_classification'''
else:
__a = '''multi_label_classification'''
if self.config.problem_type == "regression":
__a = MSELoss()
if self.num_labels == 1:
__a = loss_fct(logits.squeeze() , labels.squeeze())
else:
__a = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__a = BCEWithLogitsLoss()
__a = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if not return_dict:
__a = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states)
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ,__UpperCAmelCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
super()._init_backbone(__SCREAMING_SNAKE_CASE)
__a = [config.embedding_size] + config.hidden_sizes
__a = ResNetEmbeddings(__SCREAMING_SNAKE_CASE)
__a = ResNetEncoder(__SCREAMING_SNAKE_CASE)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None):
'''simple docstring'''
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = self.embedder(__SCREAMING_SNAKE_CASE)
__a = self.encoder(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE)
__a = outputs.hidden_states
__a = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__a = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__SCREAMING_SNAKE_CASE , )
| 131 |
import unittest
from transformers import DonutProcessor
__snake_case :List[str] = '''naver-clova-ix/donut-base'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = DonutProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__a = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__a = self.processor.tokenajson(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 131 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
model.train()
UpperCAmelCase_ : int = model(__lowerCamelCase )
UpperCAmelCase_ : List[str] = F.mse_loss(__lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
set_seed(42 )
UpperCAmelCase_ : Dict = RegressionModel()
UpperCAmelCase_ : Optional[Any] = deepcopy(__lowerCamelCase )
UpperCAmelCase_ : Tuple = RegressionDataset(length=80 )
UpperCAmelCase_ : List[Any] = DataLoader(__lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ : Any = AdamW(params=model.parameters(), lr=1E-3 )
UpperCAmelCase_ : str = AdamW(params=ddp_model.parameters(), lr=1E-3 )
UpperCAmelCase_ : str = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
UpperCAmelCase_ : List[str] = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.prepare(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( __lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_training_setup(__lowerCamelCase )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def __a ( __lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = get_training_setup(__lowerCamelCase )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ : int = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Dict = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def __a ( __lowerCamelCase=False, __lowerCamelCase=False ):
UpperCAmelCase_ : Tuple = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : int = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : int = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def __a ( __lowerCamelCase=False, __lowerCamelCase=False ):
UpperCAmelCase_ : List[Any] = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_training_setup(__lowerCamelCase, __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : str = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __a ( ):
UpperCAmelCase_ : Dict = Accelerator()
UpperCAmelCase_ : Tuple = RegressionDataset(length=80 )
UpperCAmelCase_ : str = DataLoader(__lowerCamelCase, batch_size=16 )
UpperCAmelCase_ : Optional[Any] = RegressionDataset(length=96 )
UpperCAmelCase_ : List[Any] = DataLoader(__lowerCamelCase, batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ):
UpperCAmelCase_ : str = Accelerator()
UpperCAmelCase_ : int = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(__lowerCamelCase, __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 61 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 | 0 |
from manim import *
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Rectangle(height=0.5, width=0.5 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ : Tuple = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : int = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE__ : List[str] = VGroup(_UpperCAmelCase, _UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE__ : Dict = Text("CPU", font_size=2_4 )
SCREAMING_SNAKE_CASE__ : List[str] = Group(_UpperCAmelCase, _UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0.5, aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE__ : Any = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE__ : int = Text("GPU", font_size=2_4 )
SCREAMING_SNAKE_CASE__ : Tuple = Group(_UpperCAmelCase, _UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0.5, aligned_edge=_UpperCAmelCase )
gpu.align_to(_UpperCAmelCase, _UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0 )
SCREAMING_SNAKE_CASE__ : Any = Text("Model", font_size=2_4 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Group(_UpperCAmelCase, _UpperCAmelCase ).arrange(_UpperCAmelCase, buff=0.5, aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_UpperCAmelCase, run_time=1 ), Create(_UpperCAmelCase, run_time=1 ), Create(_UpperCAmelCase, run_time=1 ), )
SCREAMING_SNAKE_CASE__ : str = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=2_4, )
SCREAMING_SNAKE_CASE__ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ : List[str] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=1_8, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase, run_time=2.5 ), Write(_UpperCAmelCase ), Write(_UpperCAmelCase ) )
self.add(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : str = []
for i, rect in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase, opacity=0.7 )
cpu_target.move_to(_UpperCAmelCase )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE__ : int = 0.46 / 4
SCREAMING_SNAKE_CASE__ : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=_UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=_UpperCAmelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=_UpperCAmelCase, buff=0.0 )
cpu_targs.append(_UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_UpperCAmelCase ) )
second_animations.append(MoveToTarget(_UpperCAmelCase, run_time=1.5 ) )
self.play(*_UpperCAmelCase )
self.play(*_UpperCAmelCase )
self.wait()
| 191 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = analyze_text(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE__ : str = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE__ : Optional[int] = single_char_strings[ch]
SCREAMING_SNAKE_CASE__ : Any = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE__ ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE__ : List[str] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE__ : Optional[Any] = two_char_strings[sequence]
SCREAMING_SNAKE_CASE__ : Tuple = int(SCREAMING_SNAKE_CASE__ ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE__ )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Counter() # type: ignore
SCREAMING_SNAKE_CASE__ : Dict = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _a ( ) -> str:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 191 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class a__ ( snake_case_ ):
A = 'gptj'
A = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str,_A : List[str]=5_0400,_A : str=2048,_A : int=4096,_A : str=28,_A : int=16,_A : List[str]=64,_A : Any=None,_A : Tuple="gelu_new",_A : str=0.0,_A : List[Any]=0.0,_A : List[str]=0.0,_A : Union[str, Any]=1E-5,_A : List[Any]=0.02,_A : Tuple=True,_A : Tuple=5_0256,_A : List[Any]=5_0256,_A : List[Any]=False,**_A : Optional[int],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : int = n_positions
SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd
SCREAMING_SNAKE_CASE_ : Dict = n_layer
SCREAMING_SNAKE_CASE_ : Dict = n_head
SCREAMING_SNAKE_CASE_ : List[str] = n_inner
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rotary_dim
SCREAMING_SNAKE_CASE_ : List[Any] = activation_function
SCREAMING_SNAKE_CASE_ : Dict = resid_pdrop
SCREAMING_SNAKE_CASE_ : Optional[int] = embd_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : int = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = bos_token_id
SCREAMING_SNAKE_CASE_ : Dict = eos_token_id
super().__init__(
bos_token_id=_A,eos_token_id=_A,tie_word_embeddings=_A,**_A )
class a__ ( snake_case_ ):
def __init__( self : int,_A : PretrainedConfig,_A : str = "default",_A : List[PatchingSpec] = None,_A : bool = False,):
"""simple docstring"""
super().__init__(_A,task=_A,patching_specs=_A,use_past=_A )
if not getattr(self._config,"pad_token_id",_A ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_ : List[Any] = 0
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_A,direction="inputs" )
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self._config.n_layer
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return self._config.n_head
def __UpperCamelCase ( self : Optional[int],_A : PreTrainedTokenizer,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = super(_A,self ).generate_dummy_inputs(
_A,batch_size=_A,seq_length=_A,is_pair=_A,framework=_A )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ : Any = seqlen + 2
SCREAMING_SNAKE_CASE_ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ : Dict = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ : Tuple = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE_ : Tuple = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_A,_A,dtype=_A )],dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return 13
| 18 | from __future__ import annotations
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ): # noqa: E741
while r - l > 1:
UpperCamelCase_ : Union[str, Any] = (l + r) // 2
if v[m] >= key:
UpperCamelCase_ : str = m
else:
UpperCamelCase_ : List[Any] = m # noqa: E741
return r
def __lowercase ( lowerCamelCase : list[int] ):
if len(lowerCamelCase ) == 0:
return 0
UpperCamelCase_ : Tuple = [0] * len(lowerCamelCase )
UpperCamelCase_ : int = 1
UpperCamelCase_ : Dict = v[0]
for i in range(1 , len(lowerCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase_ : Any = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase_ : Dict = v[i]
length += 1
else:
UpperCamelCase_ : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class lowerCamelCase ( lowerCAmelCase_ ):
snake_case_ = 'blip_2_vision_model'
def __init__( self, lowercase_=1408, lowercase_=6144, lowercase_=39, lowercase_=16, lowercase_=224, lowercase_=14, lowercase_="gelu", lowercase_=0.00_001, lowercase_=0.0, lowercase_=1E-10, lowercase_=True, **lowercase_, ) -> Dict:
super().__init__(**__lowerCAmelCase )
snake_case = hidden_size
snake_case = intermediate_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = patch_size
snake_case = image_size
snake_case = initializer_range
snake_case = attention_dropout
snake_case = layer_norm_eps
snake_case = hidden_act
snake_case = qkv_bias
@classmethod
def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> Dict:
cls._set_token_in_kwargs(__lowerCAmelCase )
snake_case , snake_case = cls.get_config_dict(__lowerCAmelCase, **__lowerCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase, **__lowerCAmelCase )
class lowerCamelCase ( lowerCAmelCase_ ):
snake_case_ = 'blip_2_qformer'
def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=0.02, lowercase_=1E-12, lowercase_=0, lowercase_="absolute", lowercase_=2, lowercase_=1408, **lowercase_, ) -> str:
super().__init__(pad_token_id=__lowerCAmelCase, **__lowerCAmelCase )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = cross_attention_frequency
snake_case = encoder_hidden_size
@classmethod
def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> List[Any]:
cls._set_token_in_kwargs(__lowerCAmelCase )
snake_case , snake_case = cls.get_config_dict(__lowerCAmelCase, **__lowerCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
snake_case = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase, **__lowerCAmelCase )
class lowerCamelCase ( lowerCAmelCase_ ):
snake_case_ = 'blip-2'
snake_case_ = True
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_=32, **lowercase_ ) -> Any:
super().__init__(**__lowerCAmelCase )
if vision_config is None:
snake_case = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
snake_case = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
snake_case = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
snake_case = BlipaVisionConfig(**__lowerCAmelCase )
snake_case = BlipaQFormerConfig(**__lowerCAmelCase )
snake_case = text_config['model_type'] if 'model_type' in text_config else 'opt'
snake_case = CONFIG_MAPPING[text_model_type](**__lowerCAmelCase )
snake_case = self.text_config.tie_word_embeddings
snake_case = self.text_config.is_encoder_decoder
snake_case = num_query_tokens
snake_case = self.vision_config.hidden_size
snake_case = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case = 1.0
snake_case = 0.02
@classmethod
def _lowerCamelCase ( cls, lowercase_, lowercase_, lowercase_, **lowercase_, ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **__lowerCAmelCase, )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.vision_config.to_dict()
snake_case = self.qformer_config.to_dict()
snake_case = self.text_config.to_dict()
snake_case = self.__class__.model_type
return output
| 358 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 0 |
from math import factorial
def __lowercase ( _A , _A ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_A ) // (factorial(_A ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 245 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( _A ) -> List[Tuple[int, ...]]:
SCREAMING_SNAKE_CASE : Optional[int] = []
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowercase ( _A , _A ) -> Tuple[int, ...]:
SCREAMING_SNAKE_CASE : List[Any] = []
for d in reversed(_A ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A = None , _A = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_A ) -> None:
SCREAMING_SNAKE_CASE : int = True
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : Any = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(_A )
SCREAMING_SNAKE_CASE : List[str] = len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : Tuple = end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A ) -> torch.Tensor:
SCREAMING_SNAKE_CASE : Tuple = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : List[Any] = _get_minimal_slice_set(
_A , _A , _A , )
SCREAMING_SNAKE_CASE : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowercase ( _A , _A , _A , _A , _A = False , _A = None , _A = False , ) -> Any:
if not (len(_A ) > 0):
raise ValueError("""Must provide at least one input""" )
SCREAMING_SNAKE_CASE : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_A )]
SCREAMING_SNAKE_CASE : str = tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(_A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A )
SCREAMING_SNAKE_CASE : Optional[int] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Optional[int] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : int = _select_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_A , _A )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : Tuple = layer(**_A )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(_A , _A ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : str = xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class a__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : Dict = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : Tuple = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : List[str] = (i + len(UpperCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = True
for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
SCREAMING_SNAKE_CASE : List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : List[str] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : List[Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : List[Any] = self._determine_favorable_chunk_size(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 245 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class a ( UpperCAmelCase , UpperCAmelCase ):
_lowercase = 1
@register_to_config
def __init__( self , A_=2000 , A_=0.1 , A_=20 , A_=1e-3 ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = None
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : Dict = torch.linspace(1 , self.config.sampling_eps , A_ , device=A_ )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_UpperCAmelCase : Tuple = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_UpperCAmelCase : Any = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_UpperCAmelCase : Optional[int] = std.flatten()
while len(std.shape ) < len(score.shape ):
_UpperCAmelCase : Tuple = std.unsqueeze(-1 )
_UpperCAmelCase : str = -score / std
# compute
_UpperCAmelCase : Optional[int] = -1.0 / len(self.timesteps )
_UpperCAmelCase : Dict = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_UpperCAmelCase : Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_UpperCAmelCase : Dict = beta_t.unsqueeze(-1 )
_UpperCAmelCase : Tuple = -0.5 * beta_t * x
_UpperCAmelCase : int = torch.sqrt(A_ )
_UpperCAmelCase : int = drift - diffusion**2 * score
_UpperCAmelCase : Dict = x + drift * dt
# add noise
_UpperCAmelCase : Dict = randn_tensor(x.shape , layout=x.layout , generator=A_ , device=x.device , dtype=x.dtype )
_UpperCAmelCase : Optional[int] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 355 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE_ = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
SCREAMING_SNAKE_CASE_ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = None
# source code of `config_class`
_UpperCAmelCase : int = inspect.getsource(lowerCAmelCase )
_UpperCAmelCase : Tuple = _re_checkpoint.findall(lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
_UpperCAmelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Any = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Union[str, Any] = ckpt_name
break
return checkpoint
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase : int = get_checkpoint_from_config_class(lowerCAmelCase )
_UpperCAmelCase : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_UpperCAmelCase : int = "\n".join(sorted(lowerCAmelCase ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 189 | 0 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 66 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {}
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = {}
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None:
if nodea not in self.connections:
self.add_node(snake_case )
if nodea not in self.connections:
self.add_node(snake_case )
snake_case_ :Dict = probability
def lowerCAmelCase_ ( self: List[Any] ) -> list[str]:
return list(self.connections )
def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str:
snake_case_ :Optional[Any] = 0
snake_case_ :List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase, _lowercase, _lowercase )
snake_case_ :int = Counter(graph.get_nodes() )
snake_case_ :Optional[Any] = start
for _ in range(_lowercase ):
snake_case_ :Tuple = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
UpperCAmelCase = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
UpperCAmelCase = {
"""jukebox""": 512,
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
__snake_case = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=["v3", "v2", "v2"] , _UpperCAmelCase=5_12 , _UpperCAmelCase=5 , _UpperCAmelCase="<|endoftext|>" , **_UpperCAmelCase , ):
snake_case_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case_ = version
snake_case_ = max_n_lyric_tokens
snake_case_ = n_genres
with open(_UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
snake_case_ = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
snake_case_ = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
snake_case_ = json.load(_UpperCAmelCase )
snake_case_ = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
snake_case_ = oov.replace(R'''\-\'''' , R'''\-+\'''' )
snake_case_ = regex.compile(_UpperCAmelCase )
snake_case_ = {v: k for k, v in self.artists_encoder.items()}
snake_case_ = {v: k for k, v in self.genres_encoder.items()}
snake_case_ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCamelCase__ ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCamelCase__ ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
snake_case_ = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
snake_case_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case_ = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return list(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
snake_case_ , snake_case_ , snake_case_ = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case_ = artists[idx].lower()
snake_case_ = [genres[idx].lower()]
else:
snake_case_ = self._normalize(artists[idx] ) + '''.v2'''
snake_case_ = [
self._normalize(_UpperCAmelCase ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case_ = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
snake_case_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
snake_case_ = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
snake_case_ = 0
snake_case_ = len(_UpperCAmelCase ) + 1
snake_case_ = self.vocab
snake_case_ = {v: k for k, v in self.vocab.items()}
snake_case_ = ''''''
else:
snake_case_ = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
snake_case_ = self._run_strip_accents(_UpperCAmelCase )
snake_case_ = lyrics.replace('''\\''' , '''\n''' )
snake_case_ = self.out_of_vocab.sub('''''' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = unicodedata.normalize('''NFD''' , _UpperCAmelCase )
snake_case_ = []
for char in text:
snake_case_ = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = (
[chr(_UpperCAmelCase ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
snake_case_ = frozenset(_UpperCAmelCase )
snake_case_ = re.compile(R'''_+''' )
snake_case_ = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
snake_case_ = pattern.sub('''_''' , _UpperCAmelCase ).strip('''_''' )
return text
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return " ".join(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
# Convert to TensorType
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
snake_case_ = tf.constant
snake_case_ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
snake_case_ = torch.tensor
snake_case_ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
snake_case_ = jnp.array
snake_case_ = _is_jax
else:
snake_case_ = np.asarray
snake_case_ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case_ = [inputs]
if not is_tensor(_UpperCAmelCase ):
snake_case_ = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase="pt" ):
snake_case_ = [0, 0, 0]
snake_case_ = [artist] * len(self.version )
snake_case_ = [genres] * len(self.version )
snake_case_ , snake_case_ , snake_case_ = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = [-INFINITY] * len(full_tokens[-1] )
snake_case_ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
snake_case_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
snake_case_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.artists_decoder.get(_UpperCAmelCase )
snake_case_ = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
snake_case_ = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics | 267 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 267 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a_ ( SCREAMING_SNAKE_CASE__ ):
__A = 42
__A = jnp.floataa
__A = True
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
super().setup()
lowercase_ :List[Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Tuple , *lowercase : Tuple , **lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :Tuple = super().__call__(*__lowerCamelCase , **__lowerCamelCase )
lowercase_ :Optional[int] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a_ ( SCREAMING_SNAKE_CASE__ ):
__A = FlaxBigBirdForNaturalQuestionsModule
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : Any ,__lowerCamelCase : Any ):
def cross_entropy(__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Tuple=None ):
lowercase_ :Dict = logits.shape[-1]
lowercase_ :Dict = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype("f4" )
lowercase_ :int = jax.nn.log_softmax(snake_case_ ,axis=-1 )
lowercase_ :Optional[int] = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
lowercase_ :Optional[int] = reduction(snake_case_ )
return loss
lowercase_ :str = partial(snake_case_ ,reduction=jnp.mean )
lowercase_ :Dict = cross_entropy(snake_case_ ,snake_case_ )
lowercase_ :List[str] = cross_entropy(snake_case_ ,snake_case_ )
lowercase_ :str = cross_entropy(snake_case_ ,snake_case_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a_ :
__A = "google/bigbird-roberta-base"
__A = 3_000
__A = 10_500
__A = 128
__A = 3
__A = 1
__A = 5
# tx_args
__A = 3e-5
__A = 0.0
__A = 20_000
__A = 0.0095
__A = "bigbird-roberta-natural-questions"
__A = "training-expt"
__A = "data/nq-training.jsonl"
__A = "data/nq-validation.jsonl"
def lowercase__ ( self : Tuple ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__lowerCamelCase )
lowercase_ :Dict = os.path.join(self.base_dir , self.save_dir )
lowercase_ :Dict = self.batch_size_per_device * jax.device_count()
@dataclass
class a_ :
__A = 42
__A = 4_096 # no dynamic padding on TPUs
def __call__( self : Optional[int] , lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.collate_fn(__lowerCamelCase )
lowercase_ :Tuple = jax.tree_util.tree_map(__lowerCamelCase , __lowerCamelCase )
return batch
def lowercase__ ( self : List[Any] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :List[Any] = self.fetch_inputs(features["input_ids"] )
lowercase_ :Union[str, Any] = {
'''input_ids''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features["start_token"] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features["end_token"] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def lowercase__ ( self : List[Any] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Any = [self._fetch_inputs(__lowerCamelCase ) for ids in input_ids]
return zip(*__lowerCamelCase )
def lowercase__ ( self : Tuple , lowercase : int ):
"""simple docstring"""
lowercase_ :Any = [1 for _ in range(len(__lowerCamelCase ) )]
while len(__lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str]=None ):
if seed is not None:
lowercase_ :List[Any] = dataset.shuffle(seed=snake_case_ )
for i in range(len(snake_case_ ) // batch_size ):
lowercase_ :Tuple = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(snake_case_ )
@partial(jax.pmap ,axis_name="batch" )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ,**__lowerCamelCase : List[str] ):
def loss_fn(__lowerCamelCase : List[str] ):
lowercase_ :str = model_inputs.pop("start_labels" )
lowercase_ :str = model_inputs.pop("end_labels" )
lowercase_ :int = model_inputs.pop("pooled_labels" )
lowercase_ :Dict = state.apply_fn(**snake_case_ ,params=snake_case_ ,dropout_rng=snake_case_ ,train=snake_case_ )
lowercase_ :Union[str, Any] = outputs
return state.loss_fn(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,)
lowercase_ :int = jax.random.split(snake_case_ )
lowercase_ :str = jax.value_and_grad(snake_case_ )
lowercase_ :Optional[int] = grad_fn(state.params )
lowercase_ :List[str] = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
lowercase_ :List[str] = jax.lax.pmean(snake_case_ ,"batch" )
lowercase_ :str = state.apply_gradients(grads=snake_case_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="batch" )
def UpperCAmelCase_ ( __lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ):
lowercase_ :Tuple = model_inputs.pop("start_labels" )
lowercase_ :Dict = model_inputs.pop("end_labels" )
lowercase_ :int = model_inputs.pop("pooled_labels" )
lowercase_ :List[str] = state.apply_fn(**snake_case_ ,params=state.params ,train=snake_case_ )
lowercase_ :Dict = outputs
lowercase_ :Optional[int] = state.loss_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
lowercase_ :List[str] = jax.lax.pmean({"loss": loss} ,axis_name="batch" )
return metrics
class a_ ( train_state.TrainState ):
__A = struct.field(pytree_node=SCREAMING_SNAKE_CASE__ )
@dataclass
class a_ :
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = None
def lowercase__ ( self : Optional[Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : Optional[int] , lowercase : Any=None ):
"""simple docstring"""
lowercase_ :Tuple = model.params
lowercase_ :Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , loss_fn=__lowerCamelCase , )
if ckpt_dir is not None:
lowercase_ :Optional[Any] = restore_checkpoint(__lowerCamelCase , __lowerCamelCase )
lowercase_ :List[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
lowercase_ :List[str] = build_tx(**__lowerCamelCase )
lowercase_ :int = train_state.TrainState(
step=__lowerCamelCase , apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , opt_state=__lowerCamelCase , )
lowercase_ :int = args
lowercase_ :Optional[Any] = data_collator
lowercase_ :Tuple = lr
lowercase_ :List[Any] = params
lowercase_ :Dict = jax_utils.replicate(__lowerCamelCase )
return state
def lowercase__ ( self : Any , lowercase : int , lowercase : Optional[Any] , lowercase : Any ):
"""simple docstring"""
lowercase_ :List[Any] = self.args
lowercase_ :Dict = len(__lowerCamelCase ) // args.batch_size
lowercase_ :List[Any] = jax.random.PRNGKey(0 )
lowercase_ :Optional[Any] = jax.random.split(__lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase_ :Tuple = jnp.array(0 , dtype=jnp.floataa )
lowercase_ :Optional[Any] = get_batched_dataset(__lowerCamelCase , args.batch_size , seed=__lowerCamelCase )
lowercase_ :Union[str, Any] = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc=F'Running EPOCH-{epoch}' ):
lowercase_ :Optional[Any] = self.data_collator(__lowerCamelCase )
lowercase_ :Union[str, Any] = self.train_step_fn(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
lowercase_ :Union[str, Any] = jax_utils.unreplicate(state.step )
lowercase_ :Optional[int] = running_loss.item() / i
lowercase_ :List[Any] = self.scheduler_fn(state_step - 1 )
lowercase_ :Union[str, Any] = self.evaluate(__lowerCamelCase , __lowerCamelCase )
lowercase_ :Optional[Any] = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__lowerCamelCase ) )
self.logger.log(__lowerCamelCase , commit=__lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=__lowerCamelCase )
def lowercase__ ( self : str , lowercase : Union[str, Any] , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Union[str, Any] = get_batched_dataset(__lowerCamelCase , self.args.batch_size )
lowercase_ :int = len(__lowerCamelCase ) // self.args.batch_size
lowercase_ :Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
lowercase_ :Dict = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc="Evaluating ... " ):
lowercase_ :List[str] = self.data_collator(__lowerCamelCase )
lowercase_ :Union[str, Any] = self.val_step_fn(__lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def lowercase__ ( self : Any , lowercase : Dict , lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :Dict = jax_utils.unreplicate(__lowerCamelCase )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=" ... " )
self.model_save_fn(__lowerCamelCase , params=state.params )
with open(os.path.join(__lowerCamelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__lowerCamelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__lowerCamelCase , "data_collator.joblib" ) )
with open(os.path.join(__lowerCamelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __lowerCamelCase )
print("DONE" )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Dict ):
print(F'RESTORING CHECKPOINT FROM {save_dir}' ,end=" ... " )
with open(os.path.join(snake_case_ ,"flax_model.msgpack" ) ,"rb" ) as f:
lowercase_ :List[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(snake_case_ ,"opt_state.msgpack" ) ,"rb" ) as f:
lowercase_ :Optional[int] = from_bytes(state.opt_state ,f.read() )
lowercase_ :Tuple = joblib.load(os.path.join(snake_case_ ,"args.joblib" ) )
lowercase_ :List[str] = joblib.load(os.path.join(snake_case_ ,"data_collator.joblib" ) )
with open(os.path.join(snake_case_ ,"training_state.json" ) ,"r" ) as f:
lowercase_ :Dict = json.load(snake_case_ )
lowercase_ :int = training_state['''step''']
print("DONE" )
return params, opt_state, step, args, data_collator
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : Dict ,__lowerCamelCase : str ):
lowercase_ :str = num_train_steps - warmup_steps
lowercase_ :Union[str, Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=snake_case_ ,transition_steps=snake_case_ )
lowercase_ :Optional[Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=1e-7 ,transition_steps=snake_case_ )
lowercase_ :Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : str ):
def weight_decay_mask(__lowerCamelCase : List[Any] ):
lowercase_ :List[Any] = traverse_util.flatten_dict(snake_case_ )
lowercase_ :int = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(snake_case_ )
lowercase_ :List[Any] = scheduler_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
lowercase_ :List[str] = optax.adamw(learning_rate=snake_case_ ,weight_decay=snake_case_ ,mask=snake_case_ )
return tx, lr
| 223 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ = NewType("""DataClass""", Any)
a_ = NewType("""DataClassType""", Any)
def __lowercase ( snake_case_ : List[str] ) ->List[str]:
'''simple docstring'''
if isinstance(snake_case_ ,snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowercase ( snake_case_ : list ) ->Callable[[str], Any]:
'''simple docstring'''
__A : List[Any] = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ ,snake_case_ )
def __lowercase ( *,
snake_case_ : Union[str, List[str]] = None ,snake_case_ : str = None ,snake_case_ : Any = dataclasses.MISSING ,snake_case_ : Callable[[], Any] = dataclasses.MISSING ,snake_case_ : dict = None ,**snake_case_ : str ,) ->dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A : Optional[Any] = {}
if aliases is not None:
__A : List[Any] = aliases
if help is not None:
__A : str = help
return dataclasses.field(metadata=snake_case_ ,default=snake_case_ ,default_factory=snake_case_ ,**snake_case_ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
def __init__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
if "formatter_class" not in kwargs:
__A : str = ArgumentDefaultsHelpFormatter
super().__init__(**__lowerCamelCase )
if dataclasses.is_dataclass(__lowerCamelCase ):
__A : Union[str, Any] = [dataclass_types]
__A : Optional[Any] = list(__lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowerCamelCase )
@staticmethod
def UpperCamelCase__( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = F"""--{field.name}"""
__A : List[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowerCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__A : Tuple = kwargs.pop('''aliases''' , [] )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : Optional[int] = [aliases]
__A : str = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowerCamelCase , '''UnionType''' ) and isinstance(__lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field '{field.name}'.""" )
if type(__lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
__A : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A : int = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A : int = (
field.type.__args__[0] if isinstance(__lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__A : Tuple = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A : Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type , __lowerCamelCase ) and issubclass(field.type , __lowerCamelCase )):
if origin_type is Literal:
__A : Union[str, Any] = field.type.__args__
else:
__A : Union[str, Any] = [x.value for x in field.type]
__A : Optional[int] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
else:
__A : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A : Any = copy(__lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
__A : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A : Optional[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
__A : str = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__A : int = True
elif isclass(__lowerCamelCase ) and issubclass(__lowerCamelCase , __lowerCamelCase ):
__A : str = field.type.__args__[0]
__A : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__A : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
__A : Tuple = True
else:
__A : Union[str, Any] = field.type
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
__A : List[str] = field.default_factory()
else:
__A : str = True
parser.add_argument(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A : List[str] = False
parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if hasattr(__lowerCamelCase , '''_argument_group_name''' ):
__A : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__A : List[Any] = self
try:
__A : Dict[str, type] = get_type_hints(__lowerCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowerCamelCase ):
__A : List[str] = '''.'''.join(map(__lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowerCamelCase ):
if not field.init:
continue
__A : int = type_hints[field.name]
self._parse_dataclass_field(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A : Tuple = []
if args_filename:
args_files.append(Path(__lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A : Dict = ArgumentParser()
args_file_parser.add_argument(__lowerCamelCase , type=__lowerCamelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A : List[Any] = args_file_parser.parse_known_args(args=__lowerCamelCase )
__A : Dict = vars(__lowerCamelCase ).get(args_file_flag.lstrip('''-''' ) , __lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__lowerCamelCase ) for p in cmd_args_file_paths] )
__A : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A : Tuple = self.parse_known_args(args=__lowerCamelCase )
__A : int = []
for dtype in self.dataclass_types:
__A : List[str] = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : List[str] = {k: v for k, v in vars(__lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(__lowerCamelCase , __lowerCamelCase )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Tuple = set(args.keys() )
__A : Union[str, Any] = []
for dtype in self.dataclass_types:
__A : str = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__lowerCamelCase )}""" )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
with open(Path(__lowerCamelCase ) , encoding='''utf-8''' ) as open_json_file:
__A : List[str] = json.loads(open_json_file.read() )
__A : List[str] = self.parse_dict(__lowerCamelCase , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Dict = self.parse_dict(yaml.safe_load(Path(__lowerCamelCase ).read_text() ) , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 179 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger()
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 42
UpperCamelCase_ : Tuple = field(default_factory=__a )
UpperCamelCase_ : int = field(default_factory=__a )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Tensor ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self : Optional[Any] , lowerCAmelCase__ : Tensor ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : str = 42
UpperCamelCase_ : Union[str, Any] = 42
UpperCamelCase_ : str = 1
UpperCamelCase_ : Tuple = field(default_factory=__a )
UpperCamelCase_ : List[Any] = field(default_factory=__a )
UpperCamelCase_ : int = True
def __call__( self : int , lowerCAmelCase__ : Tensor ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = Tracker(self.dest )(UpperCamelCase__ ).parametrized
_UpperCAmelCase : Dict = Tracker(self.src )(UpperCamelCase__ ).parametrized
_UpperCAmelCase : List[Any] = list(filter(lambda lowerCAmelCase__ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = list(filter(lambda lowerCAmelCase__ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while"""
F""" destination module has {len(UpperCamelCase__ )}.""" )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : nn.Module ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : List[Any] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F"""Unexpected layer name {k}"""
_UpperCAmelCase : Optional[Any] = len(UpperCamelCase__ ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
_UpperCAmelCase : List[str] = nn.ModuleDict(UpperCamelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Tensor ) -> Tuple:
"""simple docstring"""
return get_trunk_forward_outputs(
UpperCamelCase__ , out_feat_keys=UpperCamelCase__ , feature_blocks=self._feature_blocks , )
class A__ ( __a ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
if x not in self:
_UpperCAmelCase : List[str] = self.convert_name_to_timm(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = partial(lambda: (timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval(), None) )
else:
_UpperCAmelCase : List[str] = super().__getitem__(UpperCamelCase__ )
return val
class A__ ( __a ):
"""simple docstring"""
def __getitem__( self : Optional[Any] , lowerCAmelCase__ : str ) -> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
_UpperCAmelCase : int = RegNetModel
else:
_UpperCAmelCase : List[str] = RegNetForImageClassification
return val
def __UpperCAmelCase ( a_: Union[str, Any], a_: List[str], a_: List[Tuple[str, str]] ):
for from_key, to_key in keys:
_UpperCAmelCase : Dict = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def __UpperCAmelCase ( a_: str, a_: Callable[[], nn.Module], a_: Callable[[], nn.Module], a_: RegNetConfig, a_: Path, a_: bool = True, ):
print(f"""Converting {name}...""" )
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = from_model_func()
_UpperCAmelCase : List[str] = our_model_func(__UpperCamelCase ).eval()
_UpperCAmelCase : List[Any] = ModuleTransfer(src=__UpperCamelCase, dest=__UpperCamelCase, raise_if_mismatch=__UpperCamelCase )
_UpperCAmelCase : int = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCamelCase )
if from_state_dict is not None:
_UpperCAmelCase : List[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_UpperCAmelCase : Optional[Any] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_UpperCAmelCase : Optional[int] = manually_copy_vissl_head(__UpperCamelCase, our_model.state_dict(), __UpperCamelCase )
our_model.load_state_dict(__UpperCamelCase )
_UpperCAmelCase : Optional[Any] = our_model(__UpperCamelCase, output_hidden_states=__UpperCamelCase )
_UpperCAmelCase : int = (
our_outputs.logits if isinstance(__UpperCamelCase, __UpperCamelCase ) else our_outputs.last_hidden_state
)
_UpperCAmelCase : Any = from_model(__UpperCamelCase )
_UpperCAmelCase : List[str] = from_output[-1] if type(__UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_UpperCAmelCase : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(__UpperCamelCase, __UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message="Add model", use_temp_dir=__UpperCamelCase, )
_UpperCAmelCase : Union[str, Any] = 224 if "seer" not in name else 384
# we can use the convnext one
_UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=__UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message="Add image processor", use_temp_dir=__UpperCamelCase, )
print(f"""Pushed {name}""" )
def __UpperCAmelCase ( a_: Path, a_: str = None, a_: bool = True ):
_UpperCAmelCase : Any = "imagenet-1k-id2label.json"
_UpperCAmelCase : List[str] = 1_000
_UpperCAmelCase : Tuple = (1, num_labels)
_UpperCAmelCase : Any = "huggingface/label-files"
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(__UpperCamelCase, __UpperCamelCase, repo_type="dataset" ) ), "r" ) )
_UpperCAmelCase : List[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = idalabel
_UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = partial(__UpperCamelCase, num_labels=__UpperCamelCase, idalabel=__UpperCamelCase, labelaid=__UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1_008], groups_width=48, layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1_360], groups_width=40, layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1_624], groups_width=56, layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1_920], groups_width=120, layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2_240], groups_width=112, layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2_048], groups_width=128, layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1_344, 2_520], groups_width=168, layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1_512], groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1_088], groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1_296], groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2_016], groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2_240], groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1_232, 3_024], groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1_968, 4_920], groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1_056, 2_904, 7_392], groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1_696, 2_544, 5_088], groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2_020, 4_040, 11_110, 28_280], groups_width=1_010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1_968, 4_920], groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1_056, 2_904, 7_392], groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1_696, 2_544, 5_088], groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2_020, 4_040, 11_110, 28_280], groups_width=1_010 ),
}
_UpperCAmelCase : List[Any] = NameToOurModelFuncMap()
_UpperCAmelCase : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(a_: str, a_: Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_UpperCAmelCase : str = torch.hub.load_state_dict_from_url(__UpperCamelCase, model_dir=str(__UpperCamelCase ), map_location="cpu" )
_UpperCAmelCase : Dict = model_func()
# check if we have a head, if yes add it
_UpperCAmelCase : Any = files["classy_state_dict"]["base_model"]["model"]
_UpperCAmelCase : str = model_state_dict["trunk"]
model.load_state_dict(__UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
_UpperCAmelCase : Tuple = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_UpperCAmelCase : Optional[Any] = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_UpperCAmelCase : str = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
_UpperCAmelCase : Tuple = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1_010, w_a=1_744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
_UpperCAmelCase : Tuple = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_UpperCAmelCase : int = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_UpperCAmelCase : Tuple = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
_UpperCAmelCase : Optional[Any] = partial(
__UpperCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1_010, w_a=1_744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
__UpperCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], __UpperCamelCase, __UpperCamelCase, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__UpperCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, )
return config, expected_shape
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__a = parser.parse_args()
__a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 368 | '''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded) | 17 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Dict=1 , lowercase_ : Any=False , **lowercase_ : Dict ) -> Optional[int]:
super().__init__(**lowerCAmelCase_ )
lowercase__ : Optional[Any] = vocab_size
lowercase__ : int = d_embed
lowercase__ : Union[str, Any] = d_proj
lowercase__ : Optional[Any] = cutoffs + [vocab_size]
lowercase__ : Optional[Any] = [0] + self.cutoffs
lowercase__ : int = div_val
lowercase__ : str = self.cutoffs[0]
lowercase__ : List[Any] = len(self.cutoffs ) - 1
lowercase__ : List[Any] = self.shortlist_size + self.n_clusters
lowercase__ : Any = keep_order
lowercase__ : Dict = []
lowercase__ : int = []
def __UpperCamelCase ( self : Tuple , lowercase_ : int ) -> Union[str, Any]:
if self.n_clusters > 0:
lowercase__ : Any = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase_ , name="cluster_weight" )
lowercase__ : Any = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=lowerCAmelCase_ , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase__ : Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
lowercase__ : Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
lowercase__ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[Any] = self.d_embed // (self.div_val**i)
lowercase__ : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase_ )
lowercase__ : Tuple = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
lowercase__ : List[str] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple=None ) -> Any:
lowercase__ : Any = x
if proj is not None:
lowercase__ : str = tf.einsum("ibd,ed->ibe" , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum("ibd,nd->ibn" , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def __UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Any ) -> Optional[int]:
lowercase__ : Tuple = shape_list(lowerCAmelCase_ )
lowercase__ : str = tf.range(lp_size[0] , dtype=target.dtype )
lowercase__ : str = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCamelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=False ) -> Union[str, Any]:
lowercase__ : List[str] = 0
if self.n_clusters == 0:
lowercase__ : Any = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowercase__ : int = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
lowercase__ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
lowercase__ : Union[str, Any] = shape_list(lowerCAmelCase_ )
lowercase__ : Dict = []
lowercase__ : Tuple = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase__ : Optional[int] = (target >= l_idx) & (target < r_idx)
lowercase__ : Tuple = tf.where(lowerCAmelCase_ )
lowercase__ : Union[str, Any] = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
lowercase__ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx]
lowercase__ : List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase__ : Any = self.out_layers[i][0]
lowercase__ : int = self.out_layers[i][1]
if i == 0:
lowercase__ : Union[str, Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
lowercase__ : Optional[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
lowercase__ : Any = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
lowercase__ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase__ : List[str] = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Any = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
lowercase__ : List[Any] = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
lowercase__ : str = tf.nn.log_softmax(lowerCAmelCase_ )
lowercase__ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase__ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
lowercase__ : Tuple = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Any = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Dict = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
lowercase__ : Optional[Any] = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
lowercase__ : Optional[int] = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase_ : List[str] = random.Random()
def A__ ( lowerCamelCase , lowerCamelCase=1.0 , lowerCamelCase=None , lowerCamelCase=None ) -> Tuple:
if rng is None:
UpperCamelCase_: Optional[int] = global_rng
UpperCamelCase_: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : str , snake_case_ : Union[str, Any]=7 , snake_case_ : int=400 , snake_case_ : Any=2000 , snake_case_ : Optional[int]=10 , snake_case_ : str=160 , snake_case_ : Tuple=8 , snake_case_ : List[Any]=0.0 , snake_case_ : Any=4000 , snake_case_ : List[Any]=False , snake_case_ : Tuple=True , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Optional[int] = batch_size
UpperCamelCase_: Any = min_seq_length
UpperCamelCase_: Any = max_seq_length
UpperCamelCase_: List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_: List[Any] = padding_value
UpperCamelCase_: Tuple = sampling_rate
UpperCamelCase_: str = return_attention_mask
UpperCamelCase_: Tuple = do_normalize
UpperCamelCase_: Dict = feature_size
UpperCamelCase_: Union[str, Any] = chunk_length
UpperCamelCase_: Union[str, Any] = hop_length
def lowerCAmelCase__ ( self : List[str] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[int]=False , snake_case_ : List[Any]=False ):
def _flatten(snake_case_ : int ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
UpperCamelCase_: Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_: List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_: Union[str, Any] = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: Tuple = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
UpperCamelCase_: Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case_ )
UpperCamelCase_: str = feat_extract_first.to_dict()
UpperCamelCase_: Tuple = feat_extract_second.to_dict()
UpperCamelCase_: int = feat_extract_first.mel_filters
UpperCamelCase_: Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: Dict = os.path.join(snake_case_ , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case_ )
UpperCamelCase_: Any = self.feature_extraction_class.from_json_file(snake_case_ )
UpperCamelCase_: Tuple = feat_extract_first.to_dict()
UpperCamelCase_: str = feat_extract_second.to_dict()
UpperCamelCase_: List[Any] = feat_extract_first.mel_filters
UpperCamelCase_: Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase_: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_: Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_: Optional[Any] = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_: Optional[Any] = feature_extractor(snake_case_ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_: str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
UpperCamelCase_: Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
UpperCamelCase_: int = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
UpperCamelCase_: List[Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_: str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_: Union[str, Any] = np.asarray(snake_case_ )
UpperCamelCase_: Union[str, Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
UpperCamelCase_: List[Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test truncation required
UpperCamelCase_: Optional[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCamelCase_: int = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
UpperCamelCase_: str = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCamelCase_: Dict = [np.asarray(snake_case_ ) for speech_input in speech_inputs_truncated]
UpperCamelCase_: Optional[Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
UpperCamelCase_: List[Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def lowerCAmelCase__ ( self : Optional[int] ):
import torch
UpperCamelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase_: List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_: List[str] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_: Union[str, Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self : Dict , snake_case_ : Union[str, Any] ):
UpperCamelCase_: str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase_: Tuple = ds.sort("""id""" ).select(range(snake_case_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self : Tuple ):
# fmt: off
UpperCamelCase_: Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
UpperCamelCase_: Tuple = self._load_datasamples(1 )
UpperCamelCase_: List[Any] = WhisperFeatureExtractor()
UpperCamelCase_: Optional[int] = feature_extractor(snake_case_ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case_ , atol=1e-4 ) )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: List[Any] = self._load_datasamples(1 )[0]
UpperCamelCase_: List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
UpperCamelCase_: Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case_ )[0]
self.assertTrue(np.all(np.mean(snake_case_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ ) - 1 ) < 1e-3 ) )
| 223 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase : List[Any] ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
__lowerCAmelCase : Optional[int] ={
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
__lowerCAmelCase : List[str] ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class _A ( lowerCAmelCase ):
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
snake_case__ : List[Any] = RoFormerTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __lowerCAmelCase ) != strip_accents
):
lowercase = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) )
lowercase = do_lower_case
lowercase = strip_accents
lowercase = pre_tok_class(**__lowerCAmelCase )
lowercase = do_lower_case
def __getstate__( self ):
"""simple docstring"""
lowercase = self.__dict__.copy()
lowercase = BertPreTokenizer()
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = d
lowercase = self.__dict__["""_tokenizer"""].get_vocab()
lowercase = PreTokenizer.custom(JiebaPreTokenizer(__lowerCAmelCase ) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = BertPreTokenizer()
return super().save_pretrained(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
| 197 | """simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase : str =list[list[float | int]]
def UpperCAmelCase__ ( lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = matrix[row][col]
lowercase = vector[row][0]
lowercase = 0
lowercase = 0
while row < size and col < size:
# pivoting
lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase , lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
lowercase = augmented[rowa][col] / augmented[row][col]
lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(lowerCAmelCase__ )
]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> Callable[[int], int]:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
lowercase = [[0] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = (x_val + 1) ** (size - col - 1)
lowercase = y_val
lowercase = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ :int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def UpperCAmelCase__ ( lowerCAmelCase__ :Callable[[int], int] = question_function , lowerCAmelCase__ :int = 1_0 ) -> int:
'''simple docstring'''
lowercase = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase = 0
lowercase = 42
lowercase = 42
for poly in polynomials:
lowercase = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 197 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
UpperCamelCase__ : Dict =[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =0, 0, 0
UpperCamelCase__ : int =ugly_nums[ia] * 2
UpperCamelCase__ : Optional[int] =ugly_nums[ia] * 3
UpperCamelCase__ : Optional[Any] =ugly_nums[ia] * 5
for _ in range(1 , UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] =min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
ugly_nums.append(UpperCAmelCase )
if next_num == next_a:
ia += 1
UpperCamelCase__ : Dict =ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCamelCase__ : List[str] =ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCamelCase__ : Optional[Any] =ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_0_0) = }''')
| 157 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : Union[str, Any] = """ResNetConfig"""
# Base docstring
_SCREAMING_SNAKE_CASE : str = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : List[Any] = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """tiger cat"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , bias=lowercase_ )
UpperCamelCase__ : Tuple =nn.BatchNormad(lowercase_ )
UpperCamelCase__ : int =ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor ):
UpperCamelCase__ : List[Any] =self.convolution(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.normalization(lowercase_ )
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Any =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCamelCase__ : Tuple =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCamelCase__ : Any =config.num_channels
def _lowerCAmelCase ( self : str , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCamelCase__ : Dict =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.pooler(lowercase_ )
return embedding
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
UpperCamelCase__ : int =nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
UpperCamelCase__ : Optional[int] =nn.BatchNormad(lowercase_ )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Dict =self.convolution(lowercase_ )
UpperCamelCase__ : Dict =self.normalization(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : List[str] =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , activation=lowercase_ ) , )
UpperCamelCase__ : Any =ACTaFN[activation]
def _lowerCAmelCase ( self : str , lowercase_ : Tuple ):
UpperCamelCase__ : Any =hidden_state
UpperCamelCase__ : Union[str, Any] =self.layer(lowercase_ )
UpperCamelCase__ : str =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : str =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" , lowercase_ : int = 4 ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : Union[str, Any] =out_channels // reduction
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : int =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 ) , ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
UpperCamelCase__ : List[Any] =ACTaFN[activation]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
UpperCamelCase__ : Dict =hidden_state
UpperCamelCase__ : str =self.layer(lowercase_ )
UpperCamelCase__ : Tuple =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowercase_ : ResNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
UpperCamelCase__ : Dict =ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
UpperCamelCase__ : Union[str, Any] =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , stride=lowercase_ , activation=config.hidden_act ) , *[layer(lowercase_ , lowercase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =input
for layer in self.layers:
UpperCamelCase__ : Tuple =layer(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase__ : int =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
UpperCamelCase__ : int =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] =hidden_states + (hidden_state,)
UpperCamelCase__ : List[str] =stage_module(lowercase_ )
if output_hidden_states:
UpperCamelCase__ : Optional[Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=lowercase_ , )
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ResNetConfig
SCREAMING_SNAKE_CASE_ = 'resnet'
SCREAMING_SNAKE_CASE_ = 'pixel_values'
SCREAMING_SNAKE_CASE_ = True
def _lowerCAmelCase ( self : str , lowercase_ : Optional[int] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict=False ):
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase__ : str =value
_SCREAMING_SNAKE_CASE : int = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_SCREAMING_SNAKE_CASE : Optional[int] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Dict =config
UpperCamelCase__ : str =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : str =ResNetEncoder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Optional[Any] =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : int =encoder_outputs[0]
UpperCamelCase__ : List[Any] =self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Union[str, Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Any =config.num_labels
UpperCamelCase__ : Dict =ResNetModel(lowercase_ )
# classification head
UpperCamelCase__ : Any =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
UpperCamelCase__ : Dict =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : List[Any] =self.resnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : Tuple =outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Union[str, Any] =self.classifier(lowercase_ )
UpperCamelCase__ : int =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : List[str] ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : Dict ='''single_label_classification'''
else:
UpperCamelCase__ : str ='''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase__ : Union[str, Any] =MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase__ : Dict =loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[Any] =CrossEntropyLoss()
UpperCamelCase__ : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : Optional[Any] =BCEWithLogitsLoss()
UpperCamelCase__ : List[str] =loss_fct(lowercase_ , lowercase_ )
if not return_dict:
UpperCamelCase__ : Tuple =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ', snake_case__, )
class __a ( snake_case__, snake_case__ ):
"""simple docstring"""
def __init__( self : str , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
super()._init_backbone(lowercase_ )
UpperCamelCase__ : str =[config.embedding_size] + config.hidden_sizes
UpperCamelCase__ : Optional[int] =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : Dict =ResNetEncoder(lowercase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self : int , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Any =self.embedder(lowercase_ )
UpperCamelCase__ : Optional[Any] =self.encoder(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : str =outputs.hidden_states
UpperCamelCase__ : Optional[int] =()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase__ : int =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase_ , )
| 157 | 1 |
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase__ ( A__ : dict[int, list[int]] ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = len(_UpperCamelCase ) # No of vertices in graph
__lowerCamelCase = [0] * n
__lowerCamelCase = [False] * n
def dfs(A__ : int , A__ : Optional[int] , A__ : Optional[int] , A__ : Dict ):
__lowerCamelCase = True
__lowerCamelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
__lowerCamelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__lowerCamelCase = min(low[at] , low[to] )
__lowerCamelCase = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , )
_SCREAMING_SNAKE_CASE =None
if credentials_configuration == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_SCREAMING_SNAKE_CASE =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' )
_SCREAMING_SNAKE_CASE =aws_access_key_id
_SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' )
_SCREAMING_SNAKE_CASE =aws_secret_access_key
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_SCREAMING_SNAKE_CASE =aws_region
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , )
if role_management == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' )
else:
_SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role'
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_custom_docker_image:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_inputs_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_metrics_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
_SCREAMING_SNAKE_CASE ='dynamo_'
_SCREAMING_SNAKE_CASE =_ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
_SCREAMING_SNAKE_CASE =_ask_options(
'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_SCREAMING_SNAKE_CASE =_ask_options(
_UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' )
_SCREAMING_SNAKE_CASE =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_SCREAMING_SNAKE_CASE =_ask_field(
'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , )
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
| 47 | 0 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> str:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( snake_case :List[Any] , snake_case :Dict=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def A ( snake_case :Dict , snake_case :int=0 ) -> Optional[int]:
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item.replace('in_layers.0' , 'norm1' )
__UpperCamelCase = new_item.replace('in_layers.2' , 'conv1' )
__UpperCamelCase = new_item.replace('out_layers.0' , 'norm2' )
__UpperCamelCase = new_item.replace('out_layers.3' , 'conv2' )
__UpperCamelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__UpperCamelCase = new_item.replace('skip_connection' , 'conv_shortcut' )
__UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case :Optional[Any] , snake_case :Tuple=0 ) -> Tuple:
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item
__UpperCamelCase = new_item.replace('norm.weight' , 'group_norm.weight' )
__UpperCamelCase = new_item.replace('norm.bias' , 'group_norm.bias' )
__UpperCamelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__UpperCamelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case :int , snake_case :List[str] , snake_case :List[str] , snake_case :Any=None , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None ) -> Optional[int]:
assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__UpperCamelCase = old_checkpoint[path]
__UpperCamelCase = old_tensor.shape[0] // 3
__UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__UpperCamelCase = old_tensor.shape[0] // config['num_head_channels'] // 3
__UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
__UpperCamelCase = query.reshape(snake_case )
__UpperCamelCase = key.reshape(snake_case )
__UpperCamelCase = value.reshape(snake_case )
for path in paths:
__UpperCamelCase = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__UpperCamelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__UpperCamelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__UpperCamelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__UpperCamelCase = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__UpperCamelCase = old_checkpoint[path['old']][:, :, 0]
else:
__UpperCamelCase = old_checkpoint[path['old']]
def A ( snake_case :Optional[Any] , snake_case :Dict ) -> Optional[Any]:
__UpperCamelCase = {}
__UpperCamelCase = checkpoint['time_embed.0.weight']
__UpperCamelCase = checkpoint['time_embed.0.bias']
__UpperCamelCase = checkpoint['time_embed.2.weight']
__UpperCamelCase = checkpoint['time_embed.2.bias']
__UpperCamelCase = checkpoint['input_blocks.0.0.weight']
__UpperCamelCase = checkpoint['input_blocks.0.0.bias']
__UpperCamelCase = checkpoint['out.0.weight']
__UpperCamelCase = checkpoint['out.0.bias']
__UpperCamelCase = checkpoint['out.2.weight']
__UpperCamelCase = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the middle blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the output blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
for i in range(1 , snake_case ):
__UpperCamelCase = (i - 1) // (config['num_res_blocks'] + 1)
__UpperCamelCase = (i - 1) % (config['num_res_blocks'] + 1)
__UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
__UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
__UpperCamelCase = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
__UpperCamelCase = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
__UpperCamelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case )
if len(snake_case ):
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , )
__UpperCamelCase = middle_blocks[0]
__UpperCamelCase = middle_blocks[1]
__UpperCamelCase = middle_blocks[2]
__UpperCamelCase = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
__UpperCamelCase = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case )
for i in range(snake_case ):
__UpperCamelCase = i // (config['num_res_blocks'] + 1)
__UpperCamelCase = i % (config['num_res_blocks'] + 1)
__UpperCamelCase = [shave_segments(snake_case , 2 ) for name in output_blocks[i]]
__UpperCamelCase = {}
for layer in output_block_layers:
__UpperCamelCase , __UpperCamelCase = layer.split('.' )[0], shave_segments(snake_case , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case )
else:
__UpperCamelCase = [layer_name]
if len(snake_case ) > 1:
__UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
__UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__UpperCamelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__UpperCamelCase = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
__UpperCamelCase = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(snake_case ) == 2:
__UpperCamelCase = []
if len(snake_case ):
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case , )
else:
__UpperCamelCase = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__UpperCamelCase = '.'.join(['output_blocks', str(snake_case ), path['old']] )
__UpperCamelCase = '.'.join(['up_blocks', str(snake_case ), 'resnets', str(snake_case ), path['new']] )
__UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : Optional[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase : int = json.loads(f.read())
UpperCamelCase : Dict = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase : List[Any] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase : Dict = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 263 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE : str = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE : Any = "image_qa"
SCREAMING_SNAKE_CASE : str = AutoProcessor
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE : Optional[int] = ["image", "text"]
SCREAMING_SNAKE_CASE : List[Any] = ["text"]
def __init__( self : Optional[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[Any]:
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : "Image" , _UpperCamelCase : str ) ->Union[str, Any]:
return self.pre_processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
def snake_case__( self : Optional[int] , _UpperCamelCase : Dict ) ->int:
with torch.no_grad():
return self.model(**_UpperCamelCase ).logits
def snake_case__( self : Optional[Any] , _UpperCamelCase : Tuple ) ->Tuple:
snake_case_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 8 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( snake_case_ : str ) -> Tuple:
def decorator(snake_case_ : Optional[int] ):
__snake_case = getattr(snake_case_ , '''handle_key''' , [] )
handle += [key]
setattr(snake_case_ , '''handle_key''' , snake_case_ )
return func
return decorator
def lowerCamelCase__ ( *snake_case_ : List[str] ) -> List[Any]:
def decorator(snake_case_ : List[str] ):
__snake_case = getattr(snake_case_ , '''handle_key''' , [] )
handle += keys
setattr(snake_case_ , '''handle_key''' , snake_case_ )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __new__(cls : Optional[int] , a__ : List[Any] , a__ : Optional[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = super().__new__(cls , a__ , a__ , a__ )
if not hasattr(a__ , '''key_handler''' ):
setattr(a__ , '''key_handler''' , {} )
setattr(a__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__snake_case = getattr(a__ , '''handle_key''' , [] )
for key in handled_keys:
__snake_case = value
return new_cls
@staticmethod
def a (cls : Optional[int] ):
"""simple docstring"""
__snake_case = get_character()
if char != KEYMAP["undefined"]:
__snake_case = ord(a__ )
__snake_case = cls.key_handler.get(a__ )
if handler:
__snake_case = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls : Optional[int] ) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 238 |
# Algorithm for the pigeonhole sorting
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
__snake_case = min(snake_case_ ) # min() finds the minimum value
__snake_case = max(snake_case_ ) # max() finds the maximum value
__snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(snake_case_ , snake_case_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__snake_case = 0
for count in range(snake_case_ ):
while holes[count] > 0:
holes[count] -= 1
__snake_case = count + min_val
i += 1
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(snake_case_ )
print('''Sorted order is:''' , ''' '''.join(snake_case_ ) )
if __name__ == "__main__":
main()
| 238 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase__ : str = TOKENIZER_CLASSES
else:
lowerCamelCase__ : List[Any] = {tokenizer_name: getattr(__A , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase__ : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase__ : Optional[int] = True
if checkpoint_name is None:
lowerCamelCase__ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase__ : Tuple = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase__ : List[Any] = tokenizer_class.from_pretrained(__A , force_download=__A )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = checkpoint.split('/' )
lowerCamelCase__ : Optional[Any] = os.path.join(__A , __A )
elif add_prefix:
lowerCamelCase__ : Any = checkpoint
lowerCamelCase__ : Any = dump_path
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : str = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase__ : Optional[int] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase__ : str = file_path.split(__A )[-1][0]
if next_char == "/":
lowerCamelCase__ : str = os.path.join(__A , __A )
lowerCamelCase__ : Tuple = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase__ : str = tokenizer.save_pretrained(
__A , legacy_format=__A , filename_prefix=__A )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__A )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 50 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : Dict = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Dict = '''FlavaImageProcessor'''
UpperCAmelCase__ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _snake_case : List[str]=None , _snake_case : str=None , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.image_processor
def __call__( self : List[Any] , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Any , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
UpperCAmelCase_ = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowerCamelCase ( self : Any , *_snake_case : Optional[Any] , **_snake_case : int):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , *_snake_case : int , **_snake_case : Dict):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 51 | 0 |
from manim import *
class A__ ( snake_case__ ):
"""simple docstring"""
def a_ ( self ):
snake_case = Rectangle(height=0.5 , width=0.5 )
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text('''CPU''' , font_size=2_4 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(1 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text('''GPU''' , font_size=2_4 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.align_to(__snake_case , __snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text('''Model''' , font_size=2_4 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) , )
snake_case = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=2_4 , )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=2.5 ) , Write(__snake_case ) , Write(__snake_case ) )
self.add(__snake_case )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(__snake_case ):
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
cpu_target.move_to(__snake_case )
cpu_target.generate_target()
snake_case = 0.46 / 4
snake_case = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__snake_case , buff=0.0 )
cpu_targs.append(__snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__snake_case ) )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 368 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 213 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__snake_case : Tuple =logging.getLogger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple=2 ,lowerCamelCase_ : int=3 ,lowerCamelCase_ : Union[str, Any]=16 ,lowerCamelCase_ : int = 10 ,lowerCamelCase_ : int = 2):
'''simple docstring'''
def get_dataset(lowerCamelCase_ : Dict):
lowerCAmelCase__ : Any = torch.randn(batch_size * n_batches ,1)
return TensorDataset(lowerCamelCase_ ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1))
lowerCAmelCase__ : Dict = get_dataset(lowerCamelCase_)
lowerCAmelCase__ : List[str] = get_dataset(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = DataLoader(lowerCamelCase_ ,shuffle=lowerCamelCase_ ,batch_size=lowerCamelCase_ ,num_workers=4)
lowerCAmelCase__ : Optional[int] = DataLoader(lowerCamelCase_ ,shuffle=lowerCamelCase_ ,batch_size=lowerCamelCase_ ,num_workers=4)
return (train_dataloader, valid_dataloader)
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : str=None):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
for epoch in range(lowerCamelCase_):
# Train quickly
model.train()
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ : int = batch
lowerCAmelCase__ : Dict = model(lowerCamelCase_)
lowerCAmelCase__ : Tuple = torch.nn.functional.mse_loss(lowerCamelCase_ ,lowerCamelCase_)
accelerator.backward(lowerCamelCase_)
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__(self ) -> int:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : List[str] = nn.Parameter(torch.randn(1 ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return x * self.a + self.b
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Optional[int] = DummyModel()
lowerCAmelCase__ : Tuple = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = dummy_dataloaders()
lowerCAmelCase__ : Dict = ProjectConfiguration(total_limit=1 ,project_dir=__lowerCamelCase ,automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
lowerCAmelCase__ : Optional[Any] = Accelerator(project_config=__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.prepare(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Union[str, Any] = DummyModel()
lowerCAmelCase__ : Dict = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = dummy_dataloaders()
# Train baseline
lowerCAmelCase__ : int = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Save initial
lowerCAmelCase__ : str = os.path.join(__lowerCamelCase ,'''initial''' )
accelerator.save_state(__lowerCamelCase )
((lowerCAmelCase__) , (lowerCAmelCase__)) : Dict = model.a.item(), model.b.item()
lowerCAmelCase__ : Any = optimizer.state_dict()
lowerCAmelCase__ : str = train(3 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
((lowerCAmelCase__) , (lowerCAmelCase__)) : str = model.a.item(), model.b.item()
lowerCAmelCase__ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase__ : Dict = DummyModel()
lowerCAmelCase__ : int = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
lowerCAmelCase__ , lowerCAmelCase__ : str = dummy_dataloaders()
lowerCAmelCase__ : Dict = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
accelerator.load_state(__lowerCamelCase )
((lowerCAmelCase__) , (lowerCAmelCase__)) : Dict = model.a.item(), model.b.item()
lowerCAmelCase__ : List[Any] = optimizer.state_dict()
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = train(2 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Save everything
lowerCAmelCase__ : int = os.path.join(__lowerCamelCase ,'''checkpoint''' )
accelerator.save_state(__lowerCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowerCamelCase )
test_rands += train(1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
((lowerCAmelCase__) , (lowerCAmelCase__)) : str = model.a.item(), model.b.item()
lowerCAmelCase__ : Optional[Any] = optimizer.state_dict()
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Optional[Any] = DummyModel()
lowerCAmelCase__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = dummy_dataloaders()
lowerCAmelCase__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
lowerCAmelCase__ : Dict = Accelerator(project_dir=__lowerCamelCase ,project_config=__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = accelerator.prepare(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Save initial
accelerator.save_state()
((lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = model.a.item(), model.b.item()
lowerCAmelCase__ : Dict = optimizer.state_dict()
lowerCAmelCase__ : Optional[Any] = train(3 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
((lowerCAmelCase__) , (lowerCAmelCase__)) : Optional[Any] = model.a.item(), model.b.item()
lowerCAmelCase__ : str = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase__ : str = DummyModel()
lowerCAmelCase__ : List[Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = dummy_dataloaders()
lowerCAmelCase__ : Tuple = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = Accelerator(project_dir=__lowerCamelCase ,project_config=__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.prepare(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
accelerator.load_state(os.path.join(__lowerCamelCase ,'''checkpoints''' ,'''checkpoint_0''' ) )
((lowerCAmelCase__) , (lowerCAmelCase__)) : Any = model.a.item(), model.b.item()
lowerCAmelCase__ : Tuple = optimizer.state_dict()
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = train(2 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowerCamelCase ,'''checkpoints''' ,'''checkpoint_1''' ) )
test_rands += train(1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
((lowerCAmelCase__) , (lowerCAmelCase__)) : List[str] = model.a.item(), model.b.item()
lowerCAmelCase__ : Optional[Any] = optimizer.state_dict()
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = torch.tensor([1, 2, 3] )
lowerCAmelCase__ : int = torch.tensor([2, 3, 4] )
lowerCAmelCase__ : Tuple = DummyModel()
lowerCAmelCase__ : Tuple = torch.optim.Adam(net.parameters() )
lowerCAmelCase__ : Any = Accelerator()
with self.assertRaises(__lowerCamelCase ) as ve:
accelerator.register_for_checkpointing(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : int = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : Dict = DummyModel()
lowerCAmelCase__ : Optional[int] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
lowerCAmelCase__ : str = torch.optim.lr_scheduler.StepLR(__lowerCamelCase ,step_size=1 ,gamma=0.99 )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = dummy_dataloaders()
lowerCAmelCase__ : int = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
lowerCAmelCase__ : Tuple = Accelerator(project_dir=__lowerCamelCase ,project_config=__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.prepare(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Save initial
accelerator.save_state()
lowerCAmelCase__ : Any = scheduler.state_dict()
train(3 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowerCamelCase ,'''checkpoints''' ,'''checkpoint_0''' ) )
self.assertEqual(__lowerCamelCase ,scheduler.state_dict() )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase__ : str = DummyModel()
lowerCAmelCase__ : Dict = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase ,total_limit=2 )
# Train baseline
lowerCAmelCase__ : Optional[int] = Accelerator(project_dir=__lowerCamelCase ,project_config=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = accelerator.prepare(__lowerCamelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowerCamelCase ,'''checkpoints''' ,'''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''checkpoints''' ,'''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''checkpoints''' ,'''checkpoint_10''' ) ) )
@require_cuda
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case : Tuple ='/tmp/accelerate/state_checkpointing'
__snake_case : int =DummyModel()
__snake_case : Any =torch.optim.Adam(params=model.parameters(), lr=1E-3)
__snake_case : Dict =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__snake_case , __snake_case : Any =dummy_dataloaders()
__snake_case : Tuple =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__snake_case : Dict =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__snake_case , __snake_case : str =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__snake_case : Any =group['params'][0].device
break
assert param_device.type == accelerator.device.type
__snake_case : Tuple =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__snake_case : Dict =group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__snake_case : Any =group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 129 |
import math
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase_)
if number < 1:
lowerCAmelCase__ : Dict = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase_)
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ : Optional[Any] = int(math.log(number // 3 ,2)) + 2
lowerCAmelCase__ : Optional[Any] = [3, 5]
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Tuple = 3
for block in range(1 ,lowerCamelCase_):
for _ in range(lowerCamelCase_):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case : Optional[int] =0
try:
__snake_case : List[Any] =proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 129 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
a__ : List[str] = logging.getLogger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 'token-classification'
def __init__( self , _lowerCamelCase ) ->str:
if type(_lowerCamelCase ) == dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = Namespace(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = import_module('''tasks''' )
try:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , hparams.task_type )
SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
SCREAMING_SNAKE_CASE : str = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss().ignore_index
super().__init__(_lowerCamelCase , len(self.labels ) , self.mode )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->List[Any]:
return self.model(**_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : int = self(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE : Any = self._feature_file(_lowerCamelCase )
if os.path.exists(_lowerCamelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_lowerCamelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
SCREAMING_SNAKE_CASE : Dict = self.token_classification_task.read_examples_from_file(args.data_dir , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.token_classification_task.convert_examples_to_features(
_lowerCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_lowerCamelCase , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ) ->DataLoader:
SCREAMING_SNAKE_CASE : Optional[Any] = self._feature_file(_lowerCamelCase )
logger.info('''Loading features from cached file %s''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.load(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , batch_size=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
"""Compute validation""" ""
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : Optional[Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Optional[int] = self(**_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = outputs[:2]
SCREAMING_SNAKE_CASE : Optional[Any] = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : str = np.argmax(_lowerCamelCase , axis=2 )
SCREAMING_SNAKE_CASE : Tuple = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Any = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE : int = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE : List[Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(_lowerCamelCase , _lowerCamelCase ),
'''precision''': precision_score(_lowerCamelCase , _lowerCamelCase ),
'''recall''': recall_score(_lowerCamelCase , _lowerCamelCase ),
'''f1''': fa_score(_lowerCamelCase , _lowerCamelCase ),
}
SCREAMING_SNAKE_CASE : Optional[int] = dict(results.items() )
SCREAMING_SNAKE_CASE : Optional[int] = results
return ret, preds_list, out_label_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
# when stable
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self._eval_end(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
# updating to test_epoch_end instead of deprecated test_end
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._eval_end(_lowerCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE : Any = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase ) ->Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(_lowerCamelCase , _lowerCamelCase )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=_lowerCamelCase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=_lowerCamelCase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=_lowerCamelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
a__ : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd())
a__ : int = parser.parse_args()
a__ : Any = NERTransformer(args)
a__ : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
a__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
a__ : Optional[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 19 |
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a__ )
def UpperCAmelCase_( a__ = 1 / 12_345 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : int = 3
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a__ ):
SCREAMING_SNAKE_CASE : List[str] = int(a__ )
total_partitions += 1
if check_partition_perfect(a__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int | None = None , __SCREAMING_SNAKE_CASE : int | None = None ):
"""simple docstring"""
if start is None:
lowercase_ : Dict = 0
if end is None:
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
if start >= end:
return
lowercase_ : Tuple = (start + end) // 2
slowsort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
slowsort(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : List[str] = sequence[mid], sequence[end]
slowsort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ : List[str] = GPTaConfig()
else:
UpperCAmelCase_ : int = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67 | 1 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__lowerCAmelCase : str =(
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
return (preds == labels).mean()
def _UpperCamelCase ( lowercase__ , lowercase__ ):
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
__SCREAMING_SNAKE_CASE : str = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = fa_score(y_true=lowercase__ , y_pred=lowercase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
__SCREAMING_SNAKE_CASE : List[str] = pearsonr(lowercase__ , lowercase__ )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = spearmanr(lowercase__ , lowercase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
assert len(lowercase__ ) == len(lowercase__ ), F'''Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase__ , lowercase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "mrpc":
return acc_and_fa(lowercase__ , lowercase__ )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase__ , lowercase__ )
elif task_name == "qqp":
return acc_and_fa(lowercase__ , lowercase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(lowercase__ )
| 9 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowercase ( ):
print('Making key files...' )
make_key_files('rsa' , 1_0_2_4 )
print('Key files generation successful.' )
def __lowercase ( __lowerCAmelCase : int ):
print('Generating prime p...' )
a__ = rabinMiller.generate_large_prime(__lowerCAmelCase )
print('Generating prime q...' )
a__ = rabinMiller.generate_large_prime(__lowerCAmelCase )
a__ = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
a__ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__lowerCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
a__ = cryptoMath.find_mod_inverse(__lowerCAmelCase , (p - 1) * (q - 1) )
a__ = (n, e)
a__ = (n, d)
return (public_key, private_key)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
a__ , a__ = generate_key(__lowerCAmelCase )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 240 | 0 |
"""simple docstring"""
import math
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return math.pow(_UpperCamelCase , 2 ) - a
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
return 2 * x
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
snake_case = 2.0
while start <= a:
snake_case = math.pow(_UpperCamelCase , 2 )
return start
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict = 9_9_9_9 , _UpperCamelCase : List[Any] = 0.00_00_00_00_00_00_01 ) -> Optional[Any]:
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
snake_case = get_initial_point(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
snake_case = value
snake_case = value - fx(_UpperCamelCase , _UpperCamelCase ) / fx_derivative(_UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371 | """simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase )
yield node.data
snake_case = node.next_node
@property
def snake_case ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Node(1)
SCREAMING_SNAKE_CASE__ = Node(2)
SCREAMING_SNAKE_CASE__ = Node(3)
SCREAMING_SNAKE_CASE__ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = Node(1)
print(root_node.has_loop) # False
| 149 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Tuple ={'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =[
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
import os
import numpy
import onnx
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = os.path.dirname(UpperCamelCase__ )
snake_case_ = os.path.basename(UpperCamelCase__ )
snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCamelCase__ )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 285 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
a_ = HUGGINGFACE_HUB_CACHE
a_ = 'config.json'
a_ = 'diffusion_pytorch_model.bin'
a_ = 'diffusion_flax_model.msgpack'
a_ = 'model.onnx'
a_ = 'diffusion_pytorch_model.safetensors'
a_ = 'weights.pb'
a_ = 'https://huggingface.co'
a_ = default_cache_path
a_ = 'diffusers_modules'
a_ = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
a_ = ['fp16', 'non-ema']
a_ = '.self_attn'
| 350 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =precision
SCREAMING_SNAKE_CASE__ : int =ceil(precision / 1_4 )
SCREAMING_SNAKE_CASE__ : int =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
SCREAMING_SNAKE_CASE__ : Tuple =1
SCREAMING_SNAKE_CASE__ : Any =1_3_5_9_1_4_0_9
SCREAMING_SNAKE_CASE__ : List[Any] =Decimal(UpperCamelCase__ )
for k in range(1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : str =factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ = 5_0
print(F'''The first {n} digits of pi is: {pi(n)}''') | 222 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = np.max(A__ ,axis=-1 ,keepdims=A__ )
UpperCAmelCase_ : List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=A__ )
class UpperCamelCase_ (__A ):
def _SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = {}
if "second_text" in kwargs:
UpperCAmelCase_ : List[Any] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=None ) -> Dict:
return self.tokenizer(lowerCAmelCase_ , text_pair=lowerCAmelCase_ , return_tensors=self.framework )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] ) -> Tuple:
return self.model(**lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = model_outputs.logits[0].numpy()
UpperCAmelCase_ : Optional[int] = softmax(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = np.argmax(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.model.config.idalabel[best_class]
UpperCAmelCase_ : int = probabilities[best_class].item()
UpperCAmelCase_ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 268 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_ : Optional[int] = boundary[0]
UpperCAmelCase_ : str = boundary[1]
UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ )
UpperCAmelCase_ : List[str] = 0.0
y += (h / 2.0) * f(A__ )
for i in x_i:
# print(i)
y += h * f(A__ )
y += (h / 2.0) * f(A__ )
return y
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = a + h
while x < (b - h):
yield x
UpperCAmelCase_ : Optional[Any] = x + h
def snake_case ( A__ ): # enter your function here
UpperCAmelCase_ : Dict = (x - 0) * (x - 0)
return y
def snake_case ( ):
UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration
UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution
UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration
UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 268 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_UpperCAmelCase : Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=False , ) -> Optional[Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , )
else:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase , lowercase = False ) -> Any:
'''simple docstring'''
UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
UpperCamelCase = 'cpu'
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=lowercase ).to(lowercase )
UpperCamelCase = Path(lowercase )
# TEXT ENCODER
UpperCamelCase = pipeline.text_encoder.config.max_position_embeddings
UpperCamelCase = pipeline.text_encoder.config.hidden_size
UpperCamelCase = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=lowercase , )
del pipeline.text_encoder
# UNET
UpperCamelCase = pipeline.unet.config.in_channels
UpperCamelCase = pipeline.unet.config.sample_size
UpperCamelCase = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
torch.randn(2 ).to(device=lowercase , dtype=lowercase ),
torch.randn(2 , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=lowercase , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=lowercase , use_external_data_format=lowercase , )
UpperCamelCase = str(unet_path.absolute().as_posix() )
UpperCamelCase = os.path.dirname(lowercase )
UpperCamelCase = onnx.load(lowercase )
# clean up existing tensor files
shutil.rmtree(lowercase )
os.mkdir(lowercase )
# collate external tensor files into one
onnx.save_model(
lowercase , lowercase , save_as_external_data=lowercase , all_tensors_to_one_file=lowercase , location='weights.pb' , convert_attribute=lowercase , )
del pipeline.unet
# VAE ENCODER
UpperCamelCase = pipeline.vae
UpperCamelCase = vae_encoder.config.in_channels
UpperCamelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCamelCase = lambda lowercase , lowercase : vae_encoder.encode(lowercase , lowercase )[0].sample()
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowercase , )
# VAE DECODER
UpperCamelCase = pipeline.vae
UpperCamelCase = vae_decoder.config.latent_channels
UpperCamelCase = vae_decoder.config.out_channels
# forward only through the decoder part
UpperCamelCase = vae_encoder.decode
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowercase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCamelCase = pipeline.safety_checker
UpperCamelCase = safety_checker.config.vision_config.num_channels
UpperCamelCase = safety_checker.config.vision_config.image_size
UpperCamelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowercase , lowercase , lowercase , ).to(device=lowercase , dtype=lowercase ),
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=lowercase , )
del pipeline.safety_checker
UpperCamelCase = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
UpperCamelCase = pipeline.feature_extractor
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=lowercase , feature_extractor=lowercase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowercase )
print('ONNX pipeline saved to' , lowercase )
del pipeline
del onnx_pipeline
UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(lowercase , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
_UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_UpperCAmelCase : List[str] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 110 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Union[str, Any] = 16
_UpperCAmelCase : Dict = 32
def A ( lowercase , lowercase = 16 ) -> str:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
lowercase , batched=lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
lowercase , padding='longest' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'] )
UpperCamelCase = int(config['seed'] )
UpperCamelCase = int(config['batch_size'] )
UpperCamelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
UpperCamelCase , UpperCamelCase = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 110 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 | 1 |
import requests
__snake_case : str ='''YOUR API KEY'''
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str = giphy_api_key):
lowerCAmelCase__ : str = "+".join(query.split())
lowerCAmelCase__ : Dict = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowerCAmelCase__ : Dict = requests.get(_lowerCAmelCase).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 354 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =IFInpaintingSuperResolutionPipeline
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
snake_case_ =PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Dict:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : str = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 94 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __lowerCAmelCase ( UpperCamelCase__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __lowerCAmelCase ( UpperCamelCase__ = "" ) -> bool:
if len(UpperCamelCase__ ) == 0:
return True
__lowerCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCamelCase = {}
for character in lower_case_input_str:
__lowerCamelCase = character_freq_dict.get(UpperCamelCase__ , 0 ) + 1
__lowerCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCAmelCase ( UpperCamelCase__ = "" ) -> None:
print('''\nFor string = ''' , UpperCamelCase__ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(UpperCamelCase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(UpperCamelCase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__UpperCAmelCase =input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__UpperCAmelCase =can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 67 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67 | 1 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_="" , lowercase_="train" ):
"""simple docstring"""
assert os.path.isdir(lowercase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : int = os.listdir(lowercase_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase_ : Union[str, Any] = os.path.join(lowercase_ , lowercase_ )
if not os.path.isfile(lowercase_ ):
continue
self.documents.append(lowercase_ )
def __len__( self ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.documents[idx]
UpperCAmelCase_ : Dict = document_path.split("/" )[-1]
with open(lowercase_ , encoding="utf-8" ) as source:
UpperCAmelCase_ : Union[str, Any] = source.read()
UpperCAmelCase_ : Any = process_story(lowercase_ )
return document_name, story_lines, summary_lines
def __a ( __lowerCamelCase ) -> List[Any]:
UpperCAmelCase_ : Dict = list(filter(lambda __lowerCamelCase : len(__lowerCamelCase ) != 0, [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase_ : List[Any] = [_add_missing_period(__lowerCamelCase ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = deque(__lowerCamelCase )
while True:
try:
UpperCAmelCase_ : List[str] = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowerCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase_ : int = list(filter(lambda __lowerCamelCase : not t.startswith("@highlight" ), __lowerCamelCase ) )
return story_lines, summary_lines
def __a ( __lowerCamelCase ) -> List[str]:
UpperCAmelCase_ : List[Any] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> Union[str, Any]:
if len(__lowerCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowerCamelCase )) )
return sequence
def __a ( __lowerCamelCase, __lowerCamelCase ) -> List[str]:
UpperCAmelCase_ : int = torch.ones_like(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = sequence == pad_token_id
UpperCAmelCase_ : Any = 0
return mask
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : int = [tokenizer.encode(__lowerCamelCase ) for line in story_lines]
UpperCAmelCase_ : Optional[Any] = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase_ : Optional[Any] = [tokenizer.encode(__lowerCamelCase ) for line in summary_lines]
UpperCAmelCase_ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __a ( __lowerCamelCase, __lowerCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = []
for sequence in batch:
UpperCAmelCase_ : Dict = -1
UpperCAmelCase_ : Any = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowerCamelCase )
return torch.tensor(__lowerCamelCase )
| 371 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23 | 0 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _A ( A__ ):
"""simple docstring"""
__lowercase = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=A__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=A__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=A__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=A__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=A__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=A__ , type=A__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=A__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _A ( A__ ):
"""simple docstring"""
if args.calibrator == "max":
__lowercase = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
__lowercase = '''histogram'''
elif args.calibrator == "mse":
__lowercase = '''histogram'''
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
__lowercase = QuantDescriptor(num_bits=args.aprec , calib_method=A__ )
__lowercase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(A__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(A__ )
def _A ( A__ , A__ , A__=False , A__=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(A__ , ['''embeddings'''] , which='''weight''' , _disabled=A__ )
if args.quant_disable:
set_quantizer_by_name(A__ , [''''''] , _disabled=A__ )
if args.quant_disable_keyword:
set_quantizer_by_name(A__ , args.quant_disable_keyword , _disabled=A__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=A__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=A__ )
if args.recalibrate_weights:
recalibrate_weights(A__ )
if args.fuse_qkv:
fuse_qkv(A__ , A__ )
if args.clip_gelu:
clip_gelu(A__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(A__ )
def _A ( A__ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _A ( A__ , A__ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
def fusea(A__ , A__ , A__ ):
for mod in [qq, qk, qv]:
if not hasattr(A__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
__lowercase = qq._amax.detach().item()
__lowercase = qk._amax.detach().item()
__lowercase = qv._amax.detach().item()
__lowercase = max(A__ , A__ , A__ )
qq._amax.fill_(A__ )
qk._amax.fill_(A__ )
qv._amax.fill_(A__ )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( A__ , A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
__lowercase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=A__ )
__lowercase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _A ( A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
__lowercase = mod.weight.shape[0]
__lowercase = mod._weight_quantizer._amax.detach()
__lowercase = torch.ones(A__ , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _A ( A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__lowercase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__lowercase = set(range(len(mod.weight.size() ) ) ) - axis_set
__lowercase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=A__ , keepdims=A__ ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
__lowercase = amax
def _A ( A__ , A__=25 , A__=180 , A__=None ):
"""simple docstring"""
if ignore is None:
__lowercase = []
elif not isinstance(A__ , A__ ):
__lowercase = [ignore]
__lowercase = 0
for name, mod in model.named_modules():
if not hasattr(A__ , '''weight''' ):
continue
__lowercase = max(A__ , len(A__ ) )
for name, mod in model.named_modules():
__lowercase = getattr(A__ , '''_input_quantizer''' , A__ )
__lowercase = getattr(A__ , '''_weight_quantizer''' , A__ )
if not hasattr(A__ , '''weight''' ):
continue
if type(A__ ) in ignore:
continue
if [True for s in ignore if type(A__ ) is str and s in name]:
continue
__lowercase = F"Act:{input_q.extra_repr()}"
__lowercase = F"Wgt:{weight_q.extra_repr()}"
__lowercase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(A__ ) <= line_width:
logger.info(A__ )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
for name, mod in model.named_modules():
if isinstance(A__ , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = getattr(A__ , A__ , A__ )
if quantizer_mod is not None:
assert hasattr(A__ , A__ )
setattr(A__ , A__ , A__ )
else:
logger.warning(F"{name} has no {quantizer}" )
def _A ( A__ , A__ , A__="both" , **A__ ):
"""simple docstring"""
__lowercase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(A__ , A__ , '''_input_quantizer''' , A__ , A__ )
if which in ["weight", "both"]:
set_quantizer(A__ , A__ , '''_weight_quantizer''' , A__ , A__ )
logger.info(A__ )
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_input_quantizer''' ) or hasattr(A__ , '''_weight_quantizer''' ):
for n in names:
if re.search(A__ , A__ ):
set_quantizers(A__ , A__ , **A__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(A__ , A__ ):
__lowercase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(A__ , A__ , A__ )
logger.info(A__ )
| 104 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
SCREAMING_SNAKE_CASE_ : Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def _snake_case ( UpperCAmelCase_ : str ):
A__ = {}
with open(UpperCAmelCase_ , """r""" ) as file:
for line_number, line in enumerate(UpperCAmelCase_ ):
A__ = line.strip()
if line:
A__ = line.split()
A__ = line_number
A__ = words[0]
A__ = value
return result
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
for attribute in key.split(""".""" ):
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase_ ):
A__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
A__ = 'param'
if weight_type is not None and weight_type != "param":
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
A__ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = shape_pointer.shape
# let's reduce dimension
A__ = value[0]
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = value
else:
A__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ):
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase_ ):
A__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
A__ = 'param'
if weight_type is not None and weight_type != "param":
A__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A__ = '.'.join([key, hf_param_name] )
else:
A__ = key
A__ = value if 'lm_head' in full_key else value[0]
SCREAMING_SNAKE_CASE_ : str = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[Any]=None ):
A__ = False
for key, mapped_key in MAPPING.items():
A__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCAmelCase_ )[0].split(""".""" )[-2]
A__ = mapped_key.replace("""*""" , UpperCAmelCase_ )
if "weight_g" in name:
A__ = 'weight_g'
elif "weight_v" in name:
A__ = 'weight_v'
elif "bias" in name:
A__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = 'weight'
else:
A__ = None
if hf_dict is not None:
rename_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return is_used
return is_used
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ = True
else:
A__ = load_wavaveca_layer(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ):
A__ = full_name.split("""conv_layers.""" )[-1]
A__ = name.split(""".""" )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=False ):
if config_path is not None:
A__ = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
else:
A__ = WavaVecaConfig()
if is_seq_class:
A__ = read_txt_into_dict(UpperCAmelCase_ )
A__ = idalabel
A__ = WavaVecaForSequenceClassification(UpperCAmelCase_ )
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
elif is_finetuned:
if dict_path:
A__ = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(UpperCAmelCase_ , """vocab.json""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
A__ = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ = 0
A__ = 1
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCAmelCase_ , )
A__ = True if config.feat_extract_norm == 'layer' else False
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
A__ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
A__ = WavaVecaForCTC(UpperCAmelCase_ )
else:
A__ = WavaVecaForPreTraining(UpperCAmelCase_ )
if is_finetuned or is_seq_class:
A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A__ = argparse.Namespace(task="""audio_pretraining""" )
A__ = fairseq.tasks.setup_task(UpperCAmelCase_ )
A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase_ )
A__ = model[0].eval()
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''MobileViTFeatureExtractor''']
__snake_case = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | import qiskit
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 348 | 1 |
from __future__ import annotations
import requests
__A = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __A ( _lowercase , _lowercase = 1 , _lowercase = "new" , _lowercase = None ):
'''simple docstring'''
_A = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowercase ) - valid_terms ) ):
_A = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(_lowercase )
_A = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
_A = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowercase )}
_A = {}
for id_ in range(_lowercase ):
_A = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 370 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__A = input('Enter image url: ').strip()
print(f'Downloading image from {url} ...')
__A = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__A = soup.find('meta', {'property': 'og:image'})['content']
__A = requests.get(image_url).content
__A = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 75 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _A ( snake_case , snake_case ) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
'''simple docstring'''
import argparse
import os
import re
_snake_case = 'src/transformers'
# Pattern that looks at the indentation in a line.
_snake_case = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_snake_case = re.compile(r'\[([^\]]+)\]')
def _A ( snake_case ) -> str:
_lowercase : Union[str, Any] = _re_indent.search(snake_case )
return "" if search is None else search.groups()[0]
def _A ( snake_case , snake_case="" , snake_case=None , snake_case=None ) -> Optional[int]:
_lowercase : List[str] = 0
_lowercase : str = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(snake_case ):
index += 1
_lowercase : Optional[int] = ["\n".join(lines[:index] )]
else:
_lowercase : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase : Any = [lines[index]]
index += 1
while index < len(snake_case ) and (end_prompt is None or not lines[index].startswith(snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(snake_case ) )
if index < len(snake_case ) - 1:
_lowercase : int = [lines[index + 1]]
index += 1
else:
_lowercase : Optional[int] = []
else:
blocks.append("\n".join(snake_case ) )
_lowercase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case ) > 0:
blocks.append("\n".join(snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _A ( snake_case ) -> Optional[int]:
def _inner(snake_case ):
return key(snake_case ).lower().replace("_" , "" )
return _inner
def _A ( snake_case , snake_case=None ) -> List[str]:
# If no key is provided, we use a noop.
def noop(snake_case ):
return x
if key is None:
_lowercase : Optional[int] = noop
# Constants are all uppercase, they go first.
_lowercase : Dict = [obj for obj in objects if key(snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase : int = [obj for obj in objects if key(snake_case )[0].isupper() and not key(snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase : Dict = [obj for obj in objects if not key(snake_case )[0].isupper()]
_lowercase : Union[str, Any] = ignore_underscore(snake_case )
return sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case )
def _A ( snake_case ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(snake_case ):
_lowercase : Optional[Any] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase : str = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] ) + "]"
_lowercase : Tuple = import_statement.split("\n" )
if len(snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase : Union[str, Any] = 2 if lines[1].strip() == "[" else 1
_lowercase : Optional[int] = [(i, _re_strip_line.search(snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase : Any = sort_objects(snake_case , key=lambda snake_case : x[1] )
_lowercase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
_lowercase : Optional[Any] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] )
return "\n".join(snake_case )
else:
# Finally we have to deal with imports fitting on one line
_lowercase : Optional[Any] = _re_bracket_content.sub(_replace , snake_case )
return import_statement
def _A ( snake_case , snake_case=True ) -> Dict:
with open(snake_case , encoding="utf-8" ) as f:
_lowercase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase : Optional[Any] = split_code_in_indented_blocks(
snake_case , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase : Dict = main_blocks[block_idx]
_lowercase : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
_lowercase : int = 0
while line_idx < len(snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase : Optional[Any] = len(snake_case )
else:
line_idx += 1
if line_idx >= len(snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase : Any = "\n".join(block_lines[line_idx:-1] )
_lowercase : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase : Optional[int] = split_code_in_indented_blocks(snake_case , indent_level=snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase : Union[str, Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase : str = [(pattern.search(snake_case ).groups()[0] if pattern.search(snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase : List[str] = [(i, key) for i, key in enumerate(snake_case ) if key is not None]
_lowercase : Tuple = [x[0] for x in sorted(snake_case , key=lambda snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase : Any = 0
_lowercase : str = []
for i in range(len(snake_case ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case )
count += 1
# And we put our main block back together with its first and last line.
_lowercase : int = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write("\n".join(snake_case ) )
def _A ( snake_case=True ) -> str:
_lowercase : List[Any] = []
for root, _, files in os.walk(snake_case ):
if "__init__.py" in files:
_lowercase : Tuple = sort_imports(os.path.join(snake_case , "__init__.py" ) , check_only=snake_case )
if result:
_lowercase : Any = [os.path.join(snake_case , "__init__.py" )]
if len(snake_case ) > 0:
raise ValueError(F'''Would overwrite {len(snake_case )} files, run `make style`.''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 250 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
snake_case__ : str = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case__ : int = arr[mi::-1] + arr[mi + 1 : len(__lowerCAmelCase )]
# Reverse whole list
snake_case__ : Dict = arr[cur - 1 :: -1] + arr[cur : len(__lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
A__ = input('''Enter numbers separated by a comma:\n''').strip()
A__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 44 |
A__ = 256
# Modulus to hash a string
A__ = 100_0003
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[int] = len(__lowerCAmelCase )
if p_len > t_len:
return False
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : int = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
snake_case__ : int = '''ABABX'''
snake_case__ : Any = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
snake_case__ : Dict = '''AAAB'''
snake_case__ : Union[str, Any] = '''ABAAAAAB'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
snake_case__ : Union[str, Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Optional[Any] = '''Lüsai'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = '''Lue'''
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 44 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-10 ) -> float:
'''simple docstring'''
lowercase_ = a
while True:
lowercase_ = Decimal(__lowerCAmelCase ) - (
Decimal(eval(__lowerCAmelCase ) ) / Decimal(eval(str(diff(__lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCAmelCase ) ) < precision: # noqa: S307
return float(__lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 136 |
"""simple docstring"""
from typing import List
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = {key: len(__lowerCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCAmelCase , __lowerCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
lowercase_ = max(lists_lengths.values() , default=0 )
return max(1 , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[range]:
'''simple docstring'''
lowercase_ = []
for group_idx in range(__lowerCAmelCase ):
lowercase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ = range(__lowerCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__lowerCAmelCase )
return shards_indices_per_group
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[dict]:
'''simple docstring'''
lowercase_ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
if num_shards == 1:
return [dict(__lowerCAmelCase )]
else:
lowercase_ = _distribute_shards(num_shards=__lowerCAmelCase , max_num_jobs=__lowerCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowerCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> dict:
'''simple docstring'''
lowercase_ = {len(__lowerCAmelCase ) for value in gen_kwargs.values() if isinstance(__lowerCAmelCase , __lowerCAmelCase )}
lowercase_ = {}
for size in list_sizes:
lowercase_ = list(range(__lowerCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ = dict(__lowerCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = [value[i] for i in indices_per_size[len(__lowerCAmelCase )]]
return shuffled_kwargs
| 136 | 1 |
"""simple docstring"""
def __lowerCamelCase ( a_ : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(a_ , (list, tuple) ) or not all(
isinstance(a_ , a_ ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
__SCREAMING_SNAKE_CASE :List[str] = numbers[0]
for i in range(1 , len(a_ ) ):
# update the maximum and minimum subarray products
__SCREAMING_SNAKE_CASE :Any = numbers[i]
if number < 0:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = min_till_now, max_till_now
__SCREAMING_SNAKE_CASE :Tuple = max(a_ , max_till_now * number )
__SCREAMING_SNAKE_CASE :Tuple = min(a_ , min_till_now * number )
# update the maximum product found till now
__SCREAMING_SNAKE_CASE :Optional[int] = max(a_ , a_ )
return max_prod | 239 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"vocab_file": "spiece.model"}
lowerCamelCase_ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
lowerCamelCase_ = {"bert_for_seq_generation": 5_1_2}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__="<s>" ,SCREAMING_SNAKE_CASE__="</s>" ,SCREAMING_SNAKE_CASE__="<unk>" ,SCREAMING_SNAKE_CASE__="<pad>" ,SCREAMING_SNAKE_CASE__="<::::>" ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Dict = vocab_file
__SCREAMING_SNAKE_CASE :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.__dict__.copy()
__SCREAMING_SNAKE_CASE :Dict = None
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE :str = {}
__SCREAMING_SNAKE_CASE :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__SCREAMING_SNAKE_CASE :Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ ,'''wb''' ) as fi:
__SCREAMING_SNAKE_CASE :Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 239 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Tuple = [0]
__UpperCAmelCase : Tuple = [0]
__UpperCAmelCase : Any = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 0 )
__UpperCAmelCase : Optional[int] = [60]
__UpperCAmelCase : int = [10]
__UpperCAmelCase : Dict = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : int = [1, 2, 3]
__UpperCAmelCase : Any = [3, 2, 1]
__UpperCAmelCase : Dict = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 5 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 50
__UpperCAmelCase : Tuple = [60, 100, 120]
__UpperCAmelCase : Dict = [10, 20, 30]
__UpperCAmelCase : Union[str, Any] = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 115 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = list(model.children() )[:-2]
__UpperCAmelCase : Tuple = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : str = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer
__UpperCAmelCase : List[str] = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max_seq_length
__UpperCAmelCase : Tuple = transforms
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : int , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Union[str, Any] = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Any = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : int = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[int] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 115 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Any=1024 ) -> Any:
UpperCamelCase__ : List[Any] = [], []
UpperCamelCase__ : Dict = list(zip(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCamelCase__ : Any = sorted_examples[0]
def is_too_big(__UpperCAmelCase: int ):
return tok(__UpperCAmelCase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase__ : Any = new_src + ''' ''' + src
UpperCamelCase__ : str = new_tgt + ''' ''' + tgt
if is_too_big(__UpperCAmelCase ) or is_too_big(__UpperCAmelCase ): # cant fit, finalize example
finished_src.append(__UpperCAmelCase )
finished_tgt.append(__UpperCAmelCase )
UpperCamelCase__ : str = src, tgt
else: # can fit, keep adding
UpperCamelCase__ : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__UpperCAmelCase )
finished_tgt.append(__UpperCAmelCase )
return finished_src, finished_tgt
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Path , __UpperCAmelCase: Dict , __UpperCAmelCase: Optional[Any] ) -> Dict:
UpperCamelCase__ : str = Path(__UpperCAmelCase )
save_path.mkdir(exist_ok=__UpperCAmelCase )
for split in ["train"]:
UpperCamelCase__ : Optional[Any] = data_dir / f"{split}.source", data_dir / f"{split}.target"
UpperCamelCase__ : List[Any] = [x.rstrip() for x in Path(__UpperCAmelCase ).open().readlines()]
UpperCamelCase__ : Optional[int] = [x.rstrip() for x in Path(__UpperCAmelCase ).open().readlines()]
UpperCamelCase__ : Tuple = pack_examples(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
print(f"packed {split} split from {len(__UpperCAmelCase )} examples -> {len(__UpperCAmelCase )}." )
Path(save_path / f"{split}.source" ).open('''w''' ).write('''\n'''.join(__UpperCAmelCase ) )
Path(save_path / f"{split}.target" ).open('''w''' ).write('''\n'''.join(__UpperCAmelCase ) )
for split in ["val", "test"]:
UpperCamelCase__ : Any = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(__UpperCAmelCase , save_path / f"{split}.source" )
shutil.copyfile(__UpperCAmelCase , save_path / f"{split}.target" )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__UpperCAmelCase , default=128 )
parser.add_argument('''--data_dir''' , type=__UpperCAmelCase )
parser.add_argument('''--save_path''' , type=__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = parser.parse_args()
UpperCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 353 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__ = 16, __magic_name__ = 88, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = 32, __magic_name__ = None, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "geglu", __magic_name__ = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__, attention_head_dim=__magic_name__, in_channels=__magic_name__, num_layers=__magic_name__, dropout=__magic_name__, norm_num_groups=__magic_name__, cross_attention_dim=__magic_name__, attention_bias=__magic_name__, sample_size=__magic_name__, num_vector_embeds=__magic_name__, activation_fn=__magic_name__, num_embeds_ada_norm=__magic_name__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ : int = [1, 0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__ = True, ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = hidden_states
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ : List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ : List[str] = self.transformer_index_for_condition[i]
UpperCamelCase__ : List[str] = self.transformers[transformer_index](
__magic_name__, encoder_hidden_states=__magic_name__, timestep=__magic_name__, cross_attention_kwargs=__magic_name__, return_dict=__magic_name__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 247 | 0 |
"""simple docstring"""
import os
def a_ ( ):
UpperCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase ) )
UpperCAmelCase__ = os.path.join(lowerCamelCase , 'triangle.txt' )
with open(lowerCamelCase ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = []
for line in triangle:
UpperCAmelCase__ = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(lowerCamelCase ) )
a.append(lowerCamelCase )
for i in range(1 , len(lowerCamelCase ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase , lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
'''simple docstring'''
@staticmethod
def snake_case_ ( *_snake_case , **_snake_case ) -> List[str]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case_ ( self , _snake_case , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def snake_case_ ( self , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase = '''How many cats are there?'''
UpperCAmelCase = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase = '''How many cats are there?'''
UpperCAmelCase = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
UpperCAmelCase = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
| 350 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 152 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.