code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = -1
lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase = n - a - b
if c * c == (a * a + b * b):
lowercase = a * b * c
if candidate >= product:
lowercase = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 101 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Any , _snake_case : Any ) ->Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Optional[Any] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__snake_case : Tuple = getattr(_snake_case , _snake_case ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__snake_case : int = value
elif weight_type == "weight_g":
__snake_case : List[str] = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : Any = value
else:
__snake_case : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Tuple , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : str = fairseq_model.state_dict()
__snake_case : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : str = True
if "*" in mapped_key:
__snake_case : Union[str, Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : str = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Dict = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[Any] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__snake_case : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : str = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : str = full_name.split('''conv_layers.''' )[-1]
__snake_case : Union[str, Any] = name.split('''.''' )
__snake_case : Optional[Any] = int(items[0] )
__snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__snake_case : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__snake_case : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : str=None ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = torch.load(_snake_case )
__snake_case : Any = WavLMConfigOrig(checkpoint['''cfg'''] )
__snake_case : Tuple = WavLMOrig(_snake_case )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__snake_case : List[str] = WavLMConfig.from_pretrained(_snake_case )
else:
__snake_case : Optional[Any] = WavLMConfig()
__snake_case : int = WavLMModel(_snake_case )
recursively_load_weights(_snake_case , _snake_case )
hf_wavlm.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 0 |
def UpperCamelCase( __UpperCamelCase : int ):
lowerCAmelCase_ : Union[str, Any] = int(__UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = divmod(__UpperCamelCase ,2 )
return binary_recursive(__UpperCamelCase ) + str(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : str ):
lowerCAmelCase_ : str = str(__UpperCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowerCAmelCase_ : List[Any] = '''-''' if number.startswith('''-''' ) else ''''''
lowerCAmelCase_ : List[str] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(__UpperCamelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 103 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : Any=7 ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[int]=True ,lowercase__ : List[str]=True ,lowercase__ : str=True ,lowercase__ : Dict=9_9 ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : int=4 ,lowercase__ : Dict=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : str=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : Any=5_1_2 ,lowercase__ : Optional[int]=1_6 ,lowercase__ : Optional[int]=2 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Dict=False ,lowercase__ : Optional[int]=True ,lowercase__ : str="None" ,lowercase__ : Optional[int]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Union[str, Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = relative_attention
__lowercase = position_biased_input
__lowercase = pos_att_type
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_config()
__lowercase = 3_0_0
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
__lowercase = DebertaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : int ):
__lowercase = DebertaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = DebertaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.num_labels
__lowercase = DebertaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = DebertaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = DebertaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DebertaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowercase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ,F"{output[:, 1:4, 1:4]}" )
| 104 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =DebertaTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[Any] =DebertaTokenizerFast
def __a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
a : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a : Dict = {"unk_token": "[UNK]"}
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def __a ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : Dict = "lower newer"
a : Dict = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
a : str = self.get_tokenizer()
a : str = "lower newer"
a : Union[str, Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = tokens + [tokenizer.unk_token]
a : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : List[Any] = self.get_tokenizer()
a : Optional[Any] = tokenizer("Hello" , "World" )
a : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowerCAmelCase__ )
@slow
def __a ( self ) -> Tuple:
a : Tuple = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Dict = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __a ( self ) -> str:
a : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
a : int = tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Optional[int] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
a : Dict = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Optional[Any] = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
a : Optional[int] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowerCAmelCase__ )
for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 105 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : List[str] ,lowercase_ : Any=1_0_0 ,lowercase_ : str=1_3 ,lowercase_ : Any=3_0 ,lowercase_ : Optional[int]=2 ,lowercase_ : Dict=3 ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : List[Any]=5 ,lowercase_ : Any=4 ,lowercase_ : Optional[int]=3_7 ,lowercase_ : List[Any]="gelu" ,lowercase_ : Dict=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : int=0.02 ,lowercase_ : List[str]=3 ,):
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Optional[int] = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : str = num_patches + 1
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ,lowercase_ : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Any = FlaxBeitModel(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any ,lowercase_ : Union[str, Any] ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ,lowercase_ : List[str] ,lowercase_ : str ):
lowerCAmelCase__ : Dict = self.type_sequence_label_size
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=lowercase_ )
lowerCAmelCase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(lowercase_ )
lowerCAmelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Dict ,**lowercase_ : List[Any] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Any = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : int = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __lowerCAmelCase ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : List[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Dict ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCAmelCase__ : str = np.ones((1, 1_9_6) ,dtype=lowercase_ )
# forward pass
lowerCAmelCase__ : Dict = model(pixel_values=lowercase_ ,bool_masked_pos=lowercase_ )
lowerCAmelCase__ : str = outputs.logits
# verify the logits
lowerCAmelCase__ : Any = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : str = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,lowercase_ ,atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : List[Any] = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Dict = (1, 1_0_0_0)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Dict = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : Tuple = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Optional[int] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Optional[int] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
| 106 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 0 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCAmelCase__ = threading.Lock()
lowerCAmelCase__ = None
lowerCAmelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCAmelCase__ = logging.WARNING
lowerCAmelCase__ = True
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = os.getenv("TRANSFORMERS_VERBOSITY" , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ):
'''simple docstring'''
return __name__.split("." )[0]
def a__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase : int = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase : List[Any] = False
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase : Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase : Optional[Any] = None
def a__ ( ):
'''simple docstring'''
return log_levels
def a__ ( SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowerCAmelCase : List[str] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( SCREAMING_SNAKE_CASE : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase : Optional[Any] = False
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase : List[Any] = True
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase : Optional[Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE )
def a__ ( self : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE )
def a__ ( self : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = warning_once
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase : Dict = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , snake_case__ ):
"""simple docstring"""
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase__ = _tqdm_cls()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : Tuple = False
hf_hub_utils.disable_progress_bars()
| 108 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 20}
UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Dict = min_resolution
UpperCAmelCase : int = max_resolution
UpperCAmelCase : Dict = do_resize
UpperCAmelCase : int = size
UpperCAmelCase : Optional[int] = do_center_crop
UpperCAmelCase : List[Any] = crop_size
UpperCAmelCase : Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : int = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : List[Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : int = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase : List[Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 109 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__A : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str=16 , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : List[str]=7 , lowerCamelCase : List[str]=14 , lowerCamelCase : Dict=10 , lowerCamelCase : List[Any]=19 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : List[str]=4 , lowerCamelCase : List[str]=True , lowerCamelCase : int=16 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=4 , lowerCamelCase : List[str]=4 , lowerCamelCase : Any="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=[1, 2, 3, 4, 5] , lowerCamelCase : Dict=25 , lowerCamelCase : Optional[int]=5 , ) -> List[Any]:
lowerCAmelCase_ : Tuple = d_model
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : Union[str, Any] = prediction_length
lowerCAmelCase_ : Union[str, Any] = context_length
lowerCAmelCase_ : List[Any] = cardinality
lowerCAmelCase_ : Optional[int] = num_time_features
lowerCAmelCase_ : Dict = lags_sequence
lowerCAmelCase_ : Any = embedding_dimension
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = context_length
lowerCAmelCase_ : Any = prediction_length + label_length
lowerCAmelCase_ : int = label_length
lowerCAmelCase_ : Any = moving_average
lowerCAmelCase_ : Optional[Any] = autocorrelation_factor
def __lowercase ( self : Optional[int] ) -> str:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __lowercase ( self : Dict , lowerCamelCase : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : Tuple = config.context_length + max(config.lags_sequence )
lowerCAmelCase_ : str = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase_ : str = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase_ : int = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase_ : Dict = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase_ : Any = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __lowercase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.get_config()
lowerCAmelCase_ : Any = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> int:
lowerCAmelCase_ : Dict = AutoformerModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
lowerCAmelCase_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Dict = outputs.encoder_last_hidden_state
lowerCAmelCase_ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : Tuple = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = model.create_network_inputs(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase_ : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase_ : List[str] = encoder(inputs_embeds=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCAmelCase_ : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase_ : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase_ : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase_ : Tuple = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : int = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = decoder(
trend=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __snake_case ( A__ ,A__ ,unittest.TestCase):
"""simple docstring"""
lowercase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : str = AutoformerModelTester(self )
lowerCAmelCase_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self : Any ) -> List[Any]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __lowercase ( self : List[str] ) -> Tuple:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __lowercase ( self : Dict ) -> List[str]:
pass
def __lowercase ( self : List[Any] ) -> Tuple:
lowerCAmelCase_ : Tuple = inspect.signature(getattr(SCREAMING_SNAKE_CASE__ , """forward""" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase_ : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Dict = getattr(self.model_tester , """seq_length""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = getattr(self.model_tester , """decoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = getattr(self.model_tester , """d_model""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = getattr(self.model_tester , """num_attention_heads""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : Any = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase_ : str = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# decoder attentions
lowerCAmelCase_ : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase_ : Any = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __lowercase ( self : Dict ) -> List[str]:
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase_ ( A__ : Dict="train-batch.pt" ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_UpperCamelCase , repo_type="""dataset""" )
lowerCAmelCase_ : Optional[Any] = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> int:
lowerCAmelCase_ : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = prepare_batch()
with torch.no_grad():
lowerCAmelCase_ : Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowerCAmelCase_ : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : str = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowerCAmelCase_ : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowercase ( self : str ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Tuple = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowerCAmelCase_ : Any = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE__ , rtol=1E-1 ) )
| 120 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: List[Any] ) -> list:
'''simple docstring'''
for i in range(len(_UpperCamelCase ) - 1 , 0 , -1 ):
__lowerCamelCase : Optional[Any] = False
for j in range(_UpperCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase : Dict = unsorted[j - 1], unsorted[j]
__lowerCamelCase : Any = True
for j in range(_UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
__lowerCamelCase : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input('''Enter numbers separated by a comma:\n''').strip()
__A = [int(item) for item in user_input.split(''',''')]
print(F"""{cocktail_shaker_sort(unsorted) = }""") | 135 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 0 |
"""simple docstring"""
def __lowercase ( _a = 100 ):
snake_case_ : List[Any] = (n * (n + 1) // 2) ** 2
snake_case_ : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 264 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCamelCase : Optional[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A__ = '''lm_head'''
A__ = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
A__ = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
A__ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , _UpperCamelCase )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "bias" in name:
A__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = '''weight'''
else:
A__ = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = full_name.split('''conv_layers.''' )[-1]
A__ = name.split('''.''' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True ) -> List[str]:
"""simple docstring"""
if config_path is not None:
A__ = UniSpeechConfig.from_pretrained(_UpperCamelCase )
else:
A__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
A__ = Dictionary.load_from_json(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(_UpperCamelCase , '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
A__ = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ = 42
A__ = 43
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase , _UpperCamelCase )
A__ = WavaVecaPhonemeCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCamelCase , )
A__ = True if config.feat_extract_norm == '''layer''' else False
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
A__ = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
A__ = UniSpeechForCTC(_UpperCamelCase )
else:
A__ = UniSpeechForPreTraining(_UpperCamelCase )
if is_finetuned:
A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A__ = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_unispeech.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_lowerCamelCase : Dict = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 0 |
"""simple docstring"""
def lowerCamelCase_ ():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_lowerCAmelCase :Union[str, Any] = generate_large_matrix()
_lowerCAmelCase :Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
assert all(row == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for row in grid )
assert all(list(_UpperCamelCase ) == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for col in zip(*_UpperCamelCase ) )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Dict = len(_UpperCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCAmelCase : Dict = (left + right) // 2
_UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCAmelCase : str = mid + 1
else:
_UpperCAmelCase : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCamelCase )
def lowerCamelCase_ (UpperCamelCase__ : Any ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = len(grid[0] )
for i in range(len(_UpperCamelCase ) ):
_UpperCAmelCase : Optional[int] = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCamelCase ) * len(grid[0] )) - total
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
_UpperCAmelCase : Optional[int] = 0
for row in grid:
for i, number in enumerate(_UpperCamelCase ):
if number < 0:
total += len(_UpperCamelCase ) - i
break
return total
def lowerCamelCase_ ():
from timeit import timeit
print('''Running benchmarks''' )
_UpperCAmelCase : Any = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCAmelCase : Dict = timeit(F'{func}(grid=grid)' , setup=_UpperCamelCase , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 263 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
'''simple docstring'''
if index == number_of_items:
return 0
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Optional[int] = knapsack(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 )
if weights[index] <= max_weight:
__UpperCAmelCase : str = values[index] + knapsack(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , max_weight - weights[index] , index + 1 )
return max(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =['''YolosFeatureExtractor''']
lowerCAmelCase : List[str] =['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __UpperCamelCase ( lowercase__ : Tuple, lowercase__ : List[str], lowercase__ : int, lowercase__ : str = 1_00, ):
'''simple docstring'''
__lowercase =x_start
__lowercase =fnc(_UpperCamelCase )
__lowercase =0.0
for _ in range(_UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase =(x_end - x_start) / steps + xa
__lowercase =fnc(_UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase =xa
__lowercase =fxa
return area
if __name__ == "__main__":
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCAmelCase = 10
while i <= 10_0000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 141 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=A__ , metadata={"help": "A csv or a json file containing the test data."} )
def A_ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
UpperCAmelCase : Tuple = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase : List[Any] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = field(
default=A__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
SCREAMING_SNAKE_CASE__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase : Any = data_args.train_file.split("." )[-1]
UpperCAmelCase : List[Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase : str = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
UpperCAmelCase : str = load_dataset("csv" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase : Tuple = load_dataset("json" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase : List[Any] = raw_datasets['''train'''].features['''label'''].names
UpperCAmelCase : Optional[Any] = len(_UpperCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
UpperCAmelCase : Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCamelCase , )
UpperCAmelCase : str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase : Union[str, Any] = {'''Refused''': 0, '''Entailed''': 1}
UpperCAmelCase : int = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
UpperCAmelCase : int = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__magic_name__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(__magic_name__ ):
UpperCAmelCase : List[str] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
UpperCAmelCase : int = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
UpperCAmelCase : Union[str, Any] = examples['''statement''']
UpperCAmelCase : Any = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
UpperCAmelCase : str = tokenizer(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase : Dict = examples['''label''']
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
UpperCAmelCase : int = raw_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
UpperCAmelCase : Tuple = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase : Dict = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
UpperCAmelCase : Tuple = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
UpperCAmelCase : List[Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
UpperCAmelCase : int = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
UpperCAmelCase : List[str] = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
UpperCAmelCase : Tuple = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase : List[Any] = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
UpperCAmelCase : int = None
# Initialize our Trainer
UpperCAmelCase : int = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase : int = last_checkpoint
UpperCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCamelCase )
UpperCAmelCase : Optional[Any] = train_result.metrics
UpperCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _UpperCamelCase )
trainer.save_metrics("train" , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=_UpperCamelCase )
UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
UpperCAmelCase : List[Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase : Optional[Any] = predict_dataset.remove_columns("label" )
UpperCAmelCase : List[str] = trainer.predict(_UpperCamelCase , metric_key_prefix="predict" ).predictions
UpperCAmelCase : str = np.argmax(_UpperCamelCase , axis=1 )
UpperCAmelCase : Optional[Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_UpperCamelCase ):
UpperCAmelCase : Optional[Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
UpperCAmelCase : List[Any] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowercase ( __magic_name__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 311 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_A = TypeVar("""_T""")
class lowerCamelCase ( Generic[_T] ):
'''simple docstring'''
def __init__(self , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : list[_T] = list(iterable or [] )
UpperCAmelCase__ : list[_T] = []
def __len__(self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__(self ):
"""simple docstring"""
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _a (self , _lowerCamelCase ):
"""simple docstring"""
self._stacka.append(SCREAMING_SNAKE_CASE__ )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self._stacka.pop
UpperCAmelCase__ : List[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 171 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 0 |
"""simple docstring"""
import requests
def __A ( a_ :Any , a_ :Dict) -> None:
__a : Union[str, Any] = {'''Content-Type''': '''application/json'''}
__a : Union[str, Any] = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase)
if response.status_code != 2_00:
__a : List[Any] = (
'''Request to slack returned an error '''
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_UpperCamelCase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''') | 160 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 0 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase_ ( A__ : str , A__ : Optional[Any] , A__ : Optional[Any] , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : int = multiprocessing.Manager()
lowerCAmelCase_ : int = manager.list()
lowerCAmelCase_ : List[str] = multiprocessing.Process(target=_UpperCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase_ ( A__ : Tuple , A__ : int , A__ : List[str] ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase_ : Tuple = shutil.rmtree
lowerCAmelCase_ : Tuple = os.rmdir
lowerCAmelCase_ : List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase_ : Union[str, Any] = {}
with swallow_io():
with time_limit(_UpperCamelCase ):
exec(_UpperCamelCase , _UpperCamelCase )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
lowerCAmelCase_ : Tuple = rmtree
lowerCAmelCase_ : Union[str, Any] = rmdir
lowerCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
def signal_handler(A__ : List[Any] , A__ : Dict ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , _UpperCamelCase )
signal.signal(signal.SIGALRM , _UpperCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCamelCase ):
with contextlib.redirect_stderr(_UpperCamelCase ):
with redirect_stdin(_UpperCamelCase ):
yield
@contextlib.contextmanager
def UpperCamelCase_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCamelCase ):
yield dirname
class __snake_case ( A__):
"""simple docstring"""
pass
class __snake_case ( io.StringIO):
"""simple docstring"""
def __lowercase ( self : str , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ) -> List[Any]:
raise OSError
def __lowercase ( self : Any , *lowerCamelCase : Any , **lowerCamelCase : str ) -> Any:
raise OSError
def __lowercase ( self : Tuple , *lowerCamelCase : Any , **lowerCamelCase : Dict ) -> str:
raise OSError
def __lowercase ( self : Any , *lowerCamelCase : int , **lowerCamelCase : Optional[Any] ) -> Tuple:
return False
class __snake_case ( contextlib._RedirectStream): # type: ignore
"""simple docstring"""
lowercase = 'stdin'
@contextlib.contextmanager
def UpperCamelCase_ ( A__ : Dict ):
'''simple docstring'''
if root == ".":
yield
return
lowerCAmelCase_ : Dict = os.getcwd()
os.chdir(_UpperCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCamelCase )
def UpperCamelCase_ ( A__ : Optional[Any]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase_ : str = None
lowerCAmelCase_ : List[str] = None
import os
lowerCAmelCase_ : str = '''1'''
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Union[str, Any] = None
import shutil
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
import subprocess
lowerCAmelCase_ : str = None # type: ignore
lowerCAmelCase_ : int = None
import sys
lowerCAmelCase_ : int = None
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[int] = None
| 120 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__A = logging.get_logger(__name__)
__A = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self : Optional[int] , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[str] ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__lowerCamelCase : int = model
__lowerCamelCase : List[str] = kwargs.get("model_save_dir" , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = kwargs.get("latest_model_name" , SCREAMING_SNAKE_CASE__ )
def __call__( self : Union[str, Any] , **UpperCAmelCase : Tuple ):
__lowerCamelCase : List[Any] = {k: np.array(SCREAMING_SNAKE_CASE__ ) for k, v in kwargs.items()}
return self.model.run(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Union[str, Any]=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__lowerCamelCase : Optional[Any] = '''CPUExecutionProvider'''
return ort.InferenceSession(SCREAMING_SNAKE_CASE__ , providers=[provider] , sess_options=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__ ).joinpath(SCREAMING_SNAKE_CASE__ )
try:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase : int = self.model_save_dir.joinpath(SCREAMING_SNAKE_CASE__ )
if src_path.exists():
__lowerCamelCase : int = Path(SCREAMING_SNAKE_CASE__ ).joinpath(SCREAMING_SNAKE_CASE__ )
try:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except shutil.SameFileError:
pass
def lowerCamelCase__ ( self : int , UpperCAmelCase : int , **UpperCAmelCase : Optional[int] , ):
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# saving model weights/files
self._save_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def lowerCamelCase__ ( cls : int , UpperCAmelCase : int , UpperCAmelCase : Any = None , UpperCAmelCase : List[str] = None , UpperCAmelCase : str = False , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Optional[int] , ):
__lowerCamelCase : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = OnnxRuntimeModel.load_model(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , provider=SCREAMING_SNAKE_CASE__ , sess_options=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = Path(SCREAMING_SNAKE_CASE__ )
# load model from hub
else:
# download model
__lowerCamelCase : int = hf_hub_download(
repo_id=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase : str = Path(SCREAMING_SNAKE_CASE__ ).parent
__lowerCamelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE__ ).name
__lowerCamelCase : Dict = OnnxRuntimeModel.load_model(SCREAMING_SNAKE_CASE__ , provider=SCREAMING_SNAKE_CASE__ , sess_options=SCREAMING_SNAKE_CASE__ )
return cls(model=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def lowerCamelCase__ ( cls : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] = True , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Tuple = None , **UpperCAmelCase : Optional[int] , ):
__lowerCamelCase : int = None
if len(str(SCREAMING_SNAKE_CASE__ ).split("@" ) ) == 2:
__lowerCamelCase : List[Any] = model_id.split("@" )
return cls._from_pretrained(
model_id=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) | 135 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowercase__ : Tuple = None
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowercase__ : Any = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowercase__ : int = '''▁'''
class _UpperCAmelCase ( A__):
_lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Union[str, Any] = AlbertTokenizer
def __init__( self : str , lowercase_ : str=None , lowercase_ : Optional[int]=None , lowercase_ : Dict=True , lowercase_ : List[str]=True , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]="[CLS]" , lowercase_ : str="[SEP]" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Optional[Any]="[SEP]" , lowercase_ : List[Any]="<pad>" , lowercase_ : Any="[CLS]" , lowercase_ : Optional[Any]="[MASK]" , **lowercase_ : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ : Optional[int] = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : List[str] = remove_space
snake_case_ : Union[str, Any] = keep_accents
snake_case_ : int = vocab_file
snake_case_ : List[str] = False if not self.vocab_file else True
def _snake_case ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] = None ):
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , lowercase_ : int , lowercase_ : str = None ):
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Any , lowercase_ : Any , lowercase_ : Optional[Any] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 264 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = 3
A__ = 250
A__ = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE__)
A__ = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE__ , dtype=torch.float) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = self._get_tensors(5)
A__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(9)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(10)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = MaxLengthCriteria(max_length=10)
A__ = self._get_tensors(5)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(9)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(10)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
A__ = self._get_tensors(5)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(9)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = self._get_tensors(10)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
A__ = self._get_tensors(5)
A__ = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
A__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(SCREAMING_SNAKE_CASE__):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
A__ = validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) , 1)
| 14 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCAmelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_lowerCAmelCase,_lowerCAmelCase :List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_lowerCAmelCase :int = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_lowerCAmelCase :List[str] = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCAmelCase :Optional[Any] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 263 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
def a ( _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = int(_UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = divmod(_UpperCamelCase , 2 )
return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase )
def a ( _UpperCAmelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = str(_UpperCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
__UpperCAmelCase : List[Any] = '''-''' if number.startswith('''-''' ) else ''''''
__UpperCAmelCase : List[Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'{negative}0b{binary_recursive(int(_UpperCamelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 226 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase : List[Any] =False
class a_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase_ :str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowercase_ :Optional[int] = torch.manual_seed(0 )
lowercase_ :int = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase_ :Dict = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase_ :List[Any] = generator.manual_seed(0 )
lowercase_ :Tuple = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase_ :str = '''cyberpunk 2077'''
lowercase_ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowercase_ :Optional[int] = torch.manual_seed(0 )
lowercase_ :List[Any] = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowercase_ :List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ :Any = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ :Optional[int] = '''A painting of a squirrel eating a burger '''
lowercase_ :int = torch.manual_seed(0 )
lowercase_ :int = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase_ :Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ :List[str] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ :Tuple = pipe.image_variation(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type="numpy" ).images
lowercase_ :Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ :Optional[int] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 223 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
lowerCAmelCase_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase_ = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase_ = field(default=A__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase_ = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowerCAmelCase :
lowerCAmelCase_ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase_ = field(
default=A__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
lowerCAmelCase_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase_ = field(
default=A__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__lowercase =import_module('tasks' )
try:
__lowercase =getattr(_UpperCamelCase, model_args.task_type )
__lowercase =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__lowercase =token_classification_task.get_labels(data_args.labels )
__lowercase =dict(enumerate(_UpperCamelCase ) )
__lowercase =len(_UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_UpperCamelCase, idalabel=_UpperCamelCase, labelaid={label: i for i, label in enumerate(_UpperCamelCase )}, cache_dir=model_args.cache_dir, )
__lowercase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, )
__lowercase =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, )
# Get datasets
__lowercase =(
TokenClassificationDataset(
token_classification_task=_UpperCamelCase, data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, labels=_UpperCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
__lowercase =(
TokenClassificationDataset(
token_classification_task=_UpperCamelCase, data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, labels=_UpperCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def align_predictions(lowercase__ : List[Any], lowercase__ : List[str] ) -> Tuple[List[int], List[int]]:
__lowercase =np.argmax(_UpperCamelCase, axis=2 )
__lowercase =preds.shape
__lowercase =[[] for _ in range(_UpperCamelCase )]
__lowercase =[[] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase__ : Any ) -> Dict:
__lowercase =align_predictions(p.predictions, p.label_ids )
return {
"accuracy_score": accuracy_score(_UpperCamelCase, _UpperCamelCase ),
"precision": precision_score(_UpperCamelCase, _UpperCamelCase ),
"recall": recall_score(_UpperCamelCase, _UpperCamelCase ),
"f1": fa_score(_UpperCamelCase, _UpperCamelCase ),
}
# Data collator
__lowercase =DataCollatorWithPadding(_UpperCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowercase =Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=_UpperCamelCase, eval_dataset=_UpperCamelCase, compute_metrics=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase =trainer.evaluate()
__lowercase =os.path.join(training_args.output_dir, 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s', _UpperCamelCase, _UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(_UpperCamelCase )
# Predict
if training_args.do_predict:
__lowercase =TokenClassificationDataset(
token_classification_task=_UpperCamelCase, data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, labels=_UpperCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, )
__lowercase =trainer.predict(_UpperCamelCase )
__lowercase =align_predictions(_UpperCamelCase, _UpperCamelCase )
__lowercase =os.path.join(training_args.output_dir, 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase, 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s', _UpperCamelCase, _UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__lowercase =os.path.join(training_args.output_dir, 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase, 'w' ) as writer:
with open(os.path.join(data_args.data_dir, 'test.txt' ), 'r' ) as f:
token_classification_task.write_predictions_to_file(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return results
def __UpperCamelCase ( lowercase__ : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 141 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCamelCase )
if number < 1:
UpperCAmelCase : str = F"Input value of [number={number}] must be > 0"
raise ValueError(_UpperCamelCase )
UpperCAmelCase : str = 1
for i in range(1 , _UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 0 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a__ ( lowerCAmelCase ) -> str:
UpperCAmelCase__ : List[str] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
F"""{test_file} instead.""" )
UpperCAmelCase__ : Optional[int] = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCAmelCase__ : List[Any] = components[:-1] + [test_fn.replace(""".py""" , """""" )]
UpperCAmelCase__ : Union[str, Any] = '''.'''.join(_UpperCamelCase )
return test_module_path
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Any = get_module_path(_UpperCamelCase )
UpperCAmelCase__ : Any = importlib.import_module(_UpperCamelCase )
return test_module
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[Any] = get_test_module(_UpperCamelCase )
for attr in dir(_UpperCamelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_UpperCamelCase , _UpperCamelCase ) )
# sort with class names
return sorted(_UpperCamelCase , key=lambda lowerCAmelCase : x.__name__ )
def a__ ( lowerCAmelCase ) -> Optional[int]:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[Any] = get_test_module(_UpperCamelCase )
for attr in dir(_UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = getattr(_UpperCamelCase , _UpperCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase__ : List[str] = getattr(_UpperCamelCase , """all_model_classes""" , [] )
if len(_UpperCamelCase ) > 0:
test_classes.append(_UpperCamelCase )
# sort with class names
return sorted(_UpperCamelCase , key=lambda lowerCAmelCase : x.__name__ )
def a__ ( lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : int = get_test_classes(_UpperCamelCase )
UpperCAmelCase__ : str = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_UpperCamelCase , key=lambda lowerCAmelCase : x.__name__ )
def a__ ( lowerCAmelCase ) -> List[str]:
UpperCAmelCase__ : Optional[int] = test_class()
if hasattr(_UpperCamelCase , """setUp""" ):
test.setUp()
UpperCAmelCase__ : str = None
if hasattr(_UpperCamelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase__ : str = test.model_tester.__class__
return model_tester
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Any = get_test_classes(_UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCamelCase )
# sort with class names
return sorted(_UpperCamelCase , key=lambda lowerCAmelCase : x.__name__ )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
UpperCAmelCase__ : List[str] = get_test_classes_for_model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for test_class in test_classes:
UpperCAmelCase__ : str = get_model_tester_from_test_class(_UpperCamelCase )
if tester_class is not None:
tester_classes.append(_UpperCamelCase )
# sort with class names
return sorted(_UpperCamelCase , key=lambda lowerCAmelCase : x.__name__ )
def a__ ( lowerCAmelCase ) -> str:
UpperCAmelCase__ : int = get_test_classes(_UpperCamelCase )
UpperCAmelCase__ : List[str] = {test_class: get_model_tester_from_test_class(_UpperCamelCase ) for test_class in test_classes}
return test_tester_mapping
def a__ ( lowerCAmelCase ) -> str:
UpperCAmelCase__ : Any = get_model_classes(_UpperCamelCase )
UpperCAmelCase__ : Dict = {
model_class: get_test_classes_for_model(_UpperCamelCase , _UpperCamelCase ) for model_class in model_classes
}
return model_test_mapping
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : str = get_model_classes(_UpperCamelCase )
UpperCAmelCase__ : List[str] = {
model_class: get_tester_classes_for_model(_UpperCamelCase , _UpperCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def a__ ( lowerCAmelCase ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return o
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return o.__name__
elif isinstance(_UpperCamelCase , (list, tuple) ):
return [to_json(_UpperCamelCase ) for x in o]
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return {to_json(_UpperCamelCase ): to_json(_UpperCamelCase ) for k, v in o.items()}
else:
return o
| 171 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __A ( a_ :List[str] , a_ :Optional[int] , a_ :str = 1 / sqrt(2)) -> IIRFilter:
__a : Optional[Any] = tau * frequency / samplerate
__a : Dict = sin(_UpperCamelCase)
__a : Tuple = cos(_UpperCamelCase)
__a : str = _sin / (2 * q_factor)
__a : List[Any] = (1 - _cos) / 2
__a : int = 1 - _cos
__a : Dict = 1 + alpha
__a : Any = -2 * _cos
__a : Union[str, Any] = 1 - alpha
__a : List[Any] = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :List[Any] , a_ :str , a_ :Tuple = 1 / sqrt(2)) -> IIRFilter:
__a : str = tau * frequency / samplerate
__a : List[Any] = sin(_UpperCamelCase)
__a : int = cos(_UpperCamelCase)
__a : List[Any] = _sin / (2 * q_factor)
__a : List[Any] = (1 + _cos) / 2
__a : Optional[Any] = -1 - _cos
__a : Tuple = 1 + alpha
__a : Tuple = -2 * _cos
__a : Optional[int] = 1 - alpha
__a : Dict = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :Any , a_ :Tuple , a_ :Union[str, Any] = 1 / sqrt(2)) -> IIRFilter:
__a : Dict = tau * frequency / samplerate
__a : str = sin(_UpperCamelCase)
__a : Optional[int] = cos(_UpperCamelCase)
__a : Optional[int] = _sin / (2 * q_factor)
__a : Optional[int] = _sin / 2
__a : Tuple = 0
__a : List[str] = -ba
__a : int = 1 + alpha
__a : Any = -2 * _cos
__a : Any = 1 - alpha
__a : List[str] = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :List[str] , a_ :Union[str, Any] = 1 / sqrt(2)) -> IIRFilter:
__a : str = tau * frequency / samplerate
__a : Union[str, Any] = sin(_UpperCamelCase)
__a : Any = cos(_UpperCamelCase)
__a : Optional[Any] = _sin / (2 * q_factor)
__a : List[str] = 1 - alpha
__a : List[Any] = -2 * _cos
__a : Union[str, Any] = 1 + alpha
__a : Union[str, Any] = IIRFilter(2)
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[int] = 1 / sqrt(2) , ) -> IIRFilter:
__a : Optional[int] = tau * frequency / samplerate
__a : Tuple = sin(_UpperCamelCase)
__a : Union[str, Any] = cos(_UpperCamelCase)
__a : List[Any] = _sin / (2 * q_factor)
__a : Optional[Any] = 10 ** (gain_db / 40)
__a : List[str] = 1 + alpha * big_a
__a : str = -2 * _cos
__a : Tuple = 1 - alpha * big_a
__a : int = 1 + alpha / big_a
__a : Optional[Any] = -2 * _cos
__a : Optional[Any] = 1 - alpha / big_a
__a : Union[str, Any] = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :Optional[Any] , a_ :str , a_ :Dict , a_ :str = 1 / sqrt(2) , ) -> IIRFilter:
__a : Tuple = tau * frequency / samplerate
__a : List[Any] = sin(_UpperCamelCase)
__a : List[str] = cos(_UpperCamelCase)
__a : Union[str, Any] = _sin / (2 * q_factor)
__a : Dict = 10 ** (gain_db / 40)
__a : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
__a : str = (big_a + 1) + (big_a - 1) * _cos
__a : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
__a : int = (big_a - 1) + (big_a + 1) * _cos
__a : str = 2 * sqrt(_UpperCamelCase) * alpha
__a : Dict = big_a * (pmc + aaa)
__a : List[str] = 2 * big_a * mpc
__a : Optional[int] = big_a * (pmc - aaa)
__a : Tuple = ppmc + aaa
__a : Optional[int] = -2 * pmpc
__a : Union[str, Any] = ppmc - aaa
__a : Any = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :Union[str, Any] , a_ :Optional[int] , a_ :Dict , a_ :Optional[Any] = 1 / sqrt(2) , ) -> IIRFilter:
__a : Dict = tau * frequency / samplerate
__a : int = sin(_UpperCamelCase)
__a : Optional[Any] = cos(_UpperCamelCase)
__a : Optional[int] = _sin / (2 * q_factor)
__a : List[str] = 10 ** (gain_db / 40)
__a : Any = (big_a + 1) - (big_a - 1) * _cos
__a : Dict = (big_a + 1) + (big_a - 1) * _cos
__a : List[str] = (big_a - 1) - (big_a + 1) * _cos
__a : List[str] = (big_a - 1) + (big_a + 1) * _cos
__a : List[str] = 2 * sqrt(_UpperCamelCase) * alpha
__a : Dict = big_a * (ppmc + aaa)
__a : Dict = -2 * big_a * pmpc
__a : Optional[int] = big_a * (ppmc - aaa)
__a : Dict = pmc + aaa
__a : List[Any] = 2 * mpc
__a : Union[str, Any] = pmc - aaa
__a : Union[str, Any] = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt | 160 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Any=13 , lowerCamelCase : Any=32 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : str=4 , lowerCamelCase : str=[10, 20, 30, 40] , lowerCamelCase : int=[2, 2, 3, 2] , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : List[Any]=37 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Tuple=10 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : List[str]=[2, 3, 4] , lowerCamelCase : Any=None , ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Optional[int] = num_stages
lowerCAmelCase_ : List[str] = hidden_sizes
lowerCAmelCase_ : Optional[int] = depths
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Union[str, Any] = num_labels
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Union[str, Any] = out_features
lowerCAmelCase_ : str = out_indices
lowerCAmelCase_ : Any = scope
def __lowercase ( self : Dict ) -> List[Any]:
lowerCAmelCase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ) -> int:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] ) -> Dict:
lowerCAmelCase_ : Optional[Any] = ConvNextModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = ConvNextForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : str ) -> int:
lowerCAmelCase_ : Optional[Any] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase_ : str = None
lowerCAmelCase_ : List[str] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self : int ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : Any = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( A__ ,A__ ,unittest.TestCase):
"""simple docstring"""
lowercase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __lowercase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase_ : int = ConvNextModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowercase ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def __lowercase ( self : Tuple ) -> Any:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def __lowercase ( self : Optional[int] ) -> int:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def __lowercase ( self : Optional[Any] ) -> Any:
pass
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Any = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : str ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Union[str, Any] ) -> str:
def check_hidden_states_output(lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
lowerCAmelCase_ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : int = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowercase ( self : List[str] ) -> str:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[str] = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@cached_property
def __lowercase ( self : Dict ) -> List[Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase_ : Optional[int] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Tuple = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@require_torch
class __snake_case ( unittest.TestCase ,A__):
"""simple docstring"""
lowercase = (ConvNextBackbone,) if is_torch_available() else ()
lowercase = ConvNextConfig
lowercase = False
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : Any = ConvNextModelTester(self )
| 120 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class _snake_case :
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _snake_case :
snake_case__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
snake_case__ = field(metadata={"help": "Should contain the data files for the task."} )
snake_case__ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=A__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase_ ( ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
__lowerCamelCase : Any = processors[data_args.task_name]()
__lowerCamelCase : Union[str, Any] = processor.get_labels()
__lowerCamelCase : Tuple = len(_UpperCamelCase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase : Union[str, Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowerCamelCase: int ) -> Dict:
__lowerCamelCase : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_UpperCamelCase , p.label_ids )}
# Data collator
__lowerCamelCase : Union[str, Any] = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase : List[str] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , compute_metrics=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCamelCase : Union[str, Any] = trainer.evaluate()
__lowerCamelCase : Optional[Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _UpperCamelCase , _UpperCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(_UpperCamelCase )
return results
def lowercase_ ( _lowerCamelCase: Any ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 135 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Dict = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _UpperCAmelCase ( A__):
_lowerCAmelCase : str = 'sew'
def __init__( self : Tuple , lowercase_ : Union[str, Any]=32 , lowercase_ : Any=768 , lowercase_ : Tuple=12 , lowercase_ : Optional[int]=12 , lowercase_ : List[Any]=3072 , lowercase_ : str=2 , lowercase_ : Tuple="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Dict=0.02 , lowercase_ : Any=1E-5 , lowercase_ : str="group" , lowercase_ : List[str]="gelu" , lowercase_ : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase_ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]=128 , lowercase_ : List[Any]=16 , lowercase_ : Optional[int]=True , lowercase_ : int=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Dict=2 , lowercase_ : int=0.0 , lowercase_ : str=10 , lowercase_ : int=0 , lowercase_ : str="mean" , lowercase_ : int=False , lowercase_ : Any=False , lowercase_ : Dict=256 , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , **lowercase_ : Union[str, Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = feat_extract_norm
snake_case_ : Optional[Any] = feat_extract_activation
snake_case_ : Tuple = list(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = list(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = list(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = conv_bias
snake_case_ : Tuple = num_conv_pos_embeddings
snake_case_ : Optional[Any] = num_conv_pos_embedding_groups
snake_case_ : List[str] = len(self.conv_dim )
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Tuple = intermediate_size
snake_case_ : Optional[Any] = squeeze_factor
snake_case_ : Any = hidden_act
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_dropout
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = activation_dropout
snake_case_ : Dict = feat_proj_dropout
snake_case_ : Optional[int] = final_dropout
snake_case_ : Optional[Any] = layerdrop
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Optional[int] = initializer_range
snake_case_ : Union[str, Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : int = apply_spec_augment
snake_case_ : Optional[Any] = mask_time_prob
snake_case_ : Union[str, Any] = mask_time_length
snake_case_ : List[str] = mask_time_min_masks
snake_case_ : str = mask_feature_prob
snake_case_ : Union[str, Any] = mask_feature_length
snake_case_ : Union[str, Any] = mask_feature_min_masks
# ctc loss
snake_case_ : List[str] = ctc_loss_reduction
snake_case_ : List[str] = ctc_zero_infinity
# sequence classification
snake_case_ : Any = use_weighted_layer_sum
snake_case_ : Dict = classifier_proj_size
@property
def _snake_case ( self : Optional[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 264 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Any = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["""ViTFeatureExtractor"""]
_lowerCamelCase : str = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
a__ =None
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[Any] = Node(1 )
_UpperCAmelCase : Union[str, Any] = Node(2 )
_UpperCAmelCase : Tuple = Node(3 )
_UpperCAmelCase : Optional[Any] = Node(4 )
_UpperCAmelCase : Optional[int] = Node(5 )
return tree
def lowerCamelCase_ (UpperCamelCase__ : Any ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : list[Any] = []
if root is None:
return output
_UpperCAmelCase : Dict = deque([root] )
while process_queue:
_UpperCAmelCase : Dict = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : list[Any] = []
def populate_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : list[Any] = []
def populate_output(UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
if root is None:
return []
_UpperCAmelCase : list[Sequence[Node | None]] = []
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Any = height(_UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase : Optional[int] = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase : Optional[Any] = 0
return output
def lowerCamelCase_ (): # Main function for testing.
_UpperCAmelCase : Optional[Any] = make_tree()
print(F'In-order Traversal: {inorder(_UpperCamelCase )}' )
print(F'Pre-order Traversal: {preorder(_UpperCamelCase )}' )
print(F'Post-order Traversal: {postorder(_UpperCamelCase )}' , '''\n''' )
print(F'Height of Tree: {height(_UpperCamelCase )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(_UpperCamelCase ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(_UpperCamelCase ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__A =logging.get_logger(__name__)
__A ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A ={
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A ={
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A ={
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__A ={
"facebook/dpr-ctx_encoder-single-nq-base": 5_1_2,
"facebook/dpr-ctx_encoder-multiset-base": 5_1_2,
}
__A ={
"facebook/dpr-question_encoder-single-nq-base": 5_1_2,
"facebook/dpr-question_encoder-multiset-base": 5_1_2,
}
__A ={
"facebook/dpr-reader-single-nq-base": 5_1_2,
"facebook/dpr-reader-multiset-base": 5_1_2,
}
__A ={
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__A ={
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__A ={
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase__ ( A__ ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class UpperCAmelCase__ ( A__ ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
__A =collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__A =collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__A =R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A__ )
class UpperCAmelCase__ :
'''simple docstring'''
def __call__( self : Optional[Any] , a_ : Dict , a_ : List[str] = None , a_ : Optional[Any] = None , a_ : List[Any] = False , a_ : Optional[int] = False , a_ : Union[str, Any] = None , a_ : Dict = None , a_ : int = None , **a_ : int , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
__UpperCAmelCase : Tuple = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__UpperCAmelCase : Optional[int] = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
__UpperCAmelCase : Optional[int] = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
__UpperCAmelCase : Dict = len(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Union[str, Any] = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE__ ) == len(
SCREAMING_SNAKE_CASE__ ), F'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.'
__UpperCAmelCase : List[Any] = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids''']
__UpperCAmelCase : str = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids''']
__UpperCAmelCase : List[str] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : str = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self : str , a_ : List[str] , a_ : Tuple , a_ : List[Any] = 16 , a_ : str = 64 , a_ : Any = 4 , ):
'''simple docstring'''
__UpperCAmelCase : str = reader_input['''input_ids''']
__UpperCAmelCase : Dict = reader_output[:3]
__UpperCAmelCase : Dict = len(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : int = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
__UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__UpperCAmelCase : Dict = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : str = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case__ ( self : Any , a_ : List[Any] , a_ : Optional[Any] , a_ : int , a_ : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : int = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : List[Any] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda a_ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__UpperCAmelCase : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class UpperCAmelCase__ ( A__ ,A__ ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['input_ids', 'attention_mask']
UpperCamelCase = DPRReaderTokenizer
| 226 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 0 |
'''simple docstring'''
from torch import nn
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 223 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase :
def __init__( self : int , __lowercase : Tuple ):
"""simple docstring"""
__lowercase =TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(SCREAMING_SNAKE_CASE__ ) != 0:
__lowercase =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(SCREAMING_SNAKE_CASE__ ) != cols:
raise error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
raise error
__lowercase =rows
else:
__lowercase =[]
def snake_case ( self : str ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.rows )
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return len(self.rows[0] )
@property
def snake_case ( self : Any ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return self.order[0] == self.order[1]
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(SCREAMING_SNAKE_CASE__ )
def snake_case ( self : List[str] ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def snake_case ( self : Tuple ):
"""simple docstring"""
return bool(self.determinant() )
def snake_case ( self : Tuple , __lowercase : str , __lowercase : Optional[int] ):
"""simple docstring"""
__lowercase =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(SCREAMING_SNAKE_CASE__ ).determinant()
def snake_case ( self : Dict , __lowercase : int , __lowercase : str ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return -1 * self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( self : Tuple ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def snake_case ( self : Any ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(SCREAMING_SNAKE_CASE__ )
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Tuple ):
"""simple docstring"""
return str(self.rows )
def __str__( self : Any ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(SCREAMING_SNAKE_CASE__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def snake_case ( self : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] = None ):
"""simple docstring"""
__lowercase =TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise type_error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
raise type_error
if len(SCREAMING_SNAKE_CASE__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(SCREAMING_SNAKE_CASE__ )
else:
__lowercase =self.rows[0:position] + [row] + self.rows[position:]
def snake_case ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : Optional[Any] = None ):
"""simple docstring"""
__lowercase =TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise type_error
for value in column:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
raise type_error
if len(SCREAMING_SNAKE_CASE__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__lowercase =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__lowercase =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : int , __lowercase : int ):
"""simple docstring"""
return not self == other
def __neg__( self : str ):
"""simple docstring"""
return self * -1
def __add__( self : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , __lowercase : int ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Dict , __lowercase : List[str] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : str , __lowercase : Union[str, Any] ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__lowercase =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def snake_case ( cls : Optional[Any] , __lowercase : Any , __lowercase : Optional[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase : Dict = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase : Dict = torch.permute(_UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ):
# linear layer
UpperCAmelCase : str = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase : int = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if "metadata" in layer:
UpperCAmelCase : str = layer.split("metadata" )
UpperCAmelCase : Optional[int] = ''''''.join(split_layer[0] )[:-1]
UpperCAmelCase : Tuple = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
UpperCAmelCase : Optional[int] = layer.split("kvstore" )
UpperCAmelCase : str = ''''''.join(split_layer[0] )[:-1]
UpperCAmelCase : Optional[int] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
UpperCAmelCase : List[str] = layer.split("/" )
UpperCAmelCase : Dict = '''/'''.join(split_layer[:-1] )
UpperCAmelCase : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase : Dict = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
UpperCAmelCase : Optional[int] = '''file'''
else:
UpperCAmelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = rename_keys(_UpperCamelCase )
UpperCAmelCase : int = {}
for k, v in current_block.items():
UpperCAmelCase : Tuple = v
UpperCAmelCase : Optional[int] = new_current_block
torch.save(_UpperCamelCase , _UpperCamelCase )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = WEIGHTS_NAME ):
'''simple docstring'''
UpperCAmelCase : str = convert_file_size_to_int(_UpperCamelCase )
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : List[str] = {}
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Tuple = 0
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
UpperCAmelCase : Union[str, Any] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
UpperCAmelCase : Any = flatten_dict(_UpperCamelCase , sep="/" )
UpperCAmelCase : str = {}
for layer in checkpoint_info.keys():
UpperCAmelCase : int = get_key_and_tensorstore_dict(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if curr_real_layer_name in all_layers:
UpperCAmelCase : Union[str, Any] = content
else:
UpperCAmelCase : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase : List[Any] = torch.tensor(_UpperCamelCase )
UpperCAmelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , _UpperCamelCase )
UpperCAmelCase : Union[str, Any] = '''/'''.join(_UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase : Dict = os.path.join(
_UpperCamelCase , weights_name.replace(".bin" , F"-{len(_UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(_UpperCamelCase , _UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase : int = {}
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = raw_weights.to(getattr(_UpperCamelCase , _UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase : str = os.path.join(_UpperCamelCase , weights_name.replace(".bin" , F"-{len(_UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(_UpperCamelCase , _UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase : List[str] = {}
UpperCAmelCase : List[str] = {}
for idx, shard in enumerate(_UpperCamelCase ):
UpperCAmelCase : Optional[Any] = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(_UpperCamelCase ):05d}.bin" ) # len(sharded_state_dicts):05d}
UpperCAmelCase : int = os.path.join(_UpperCamelCase , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase : List[Any] = shard
for key in shard:
UpperCAmelCase : Dict = shard_file
# Add the metadata
UpperCAmelCase : Tuple = {'''total_size''': total_size}
UpperCAmelCase : List[str] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : Optional[int] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
return metadata, index
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
a : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase : Dict = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
UpperCAmelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
UpperCAmelCase : str = TaTokenizer.from_pretrained("t5-small" )
UpperCAmelCase : int = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
UpperCAmelCase : Optional[Any] = tokenizer(_UpperCamelCase , return_tensors="pt" ).input_ids
UpperCAmelCase : Optional[Any] = model.generate(_UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 311 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 0 |
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__(self ):
"""simple docstring"""
UpperCAmelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
UpperCAmelCase__ : Dict = False
def _a (self , _lowerCamelCase ):
"""simple docstring"""
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase__ : Dict = TrieNode()
UpperCAmelCase__ : List[Any] = curr.nodes[char]
UpperCAmelCase__ : Dict = True
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase__ : Dict = curr.nodes[char]
return curr.is_leaf
def _a (self , _lowerCamelCase ):
"""simple docstring"""
def _delete(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
if index == len(SCREAMING_SNAKE_CASE__ ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase__ : Optional[Any] = False
return len(curr.nodes ) == 0
UpperCAmelCase__ : Union[str, Any] = word[index]
UpperCAmelCase__ : Tuple = curr.nodes.get(SCREAMING_SNAKE_CASE__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase__ : Tuple = _delete(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE__ , 0 )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
if node.is_leaf:
print(_UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCamelCase , word + key )
def a__ ( ) -> bool:
UpperCAmelCase__ : List[Any] = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase__ : Optional[int] = TrieNode()
root.insert_many(_UpperCamelCase )
# print_words(root, "")
assert all(root.find(_UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
print(str(_UpperCamelCase ) , """works!""" if passes else """doesn\'t work :(""" )
def a__ ( ) -> None:
assert test_trie()
def a__ ( ) -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 171 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 0 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __A ( a_ :Any) -> Optional[int]:
__a : Optional[int] = np.inf
def set_batch_size(a_ :int) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase , _UpperCamelCase):
__a : Tuple = min(_UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
elif isinstance(_UpperCamelCase , _UpperCamelCase):
__a : str = min(_UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
elif isinstance(_UpperCamelCase , _UpperCamelCase) and feature.dtype == "binary":
__a : List[str] = min(_UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
_visit(_UpperCamelCase , _UpperCamelCase)
return None if batch_size is np.inf else batch_size
class __lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : Any = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__a : List[Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
__a : Optional[int] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def _lowerCamelCase ( self ):
# Build iterable dataset
if self.streaming:
__a : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a : Union[str, Any] = None
__a : Tuple = None
__a : Dict = None
__a : Any = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__a : Any = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : str = dataset
__a : Dict = path_or_buf
__a : Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
__a : int = parquet_writer_kwargs
def _lowerCamelCase ( self ):
__a : Any = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
__a : Optional[Any] = self._write(file_obj=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
else:
__a : Any = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
__a : Union[str, Any] = 0
__a : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.dataset.features.arrow_schema
__a : List[Any] = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE__ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
__a : Dict = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written | 160 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A : List[Any] = "hf-internal-testing/tiny-random-bert"
__A : Dict = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A : Any = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ) -> int:
lowerCAmelCase_ : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(SCREAMING_SNAKE_CASE__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """refs""" , """main""" ) ) as f:
lowerCAmelCase_ : int = f.read()
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """snapshots""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE__ ) )
# File is cached at the same place the second time.
lowerCAmelCase_ : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Using a specific revision to test the full commit hash.
lowerCAmelCase_ : Union[str, Any] = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision="""9b8c223""" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """snapshots""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowercase ( self : int ) -> str:
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , """is not a valid model identifier""" ):
lowerCAmelCase_ : Optional[Any] = cached_file("""tiny-random-bert""" , SCREAMING_SNAKE_CASE__ )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , """is not a valid git identifier""" ):
lowerCAmelCase_ : Dict = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision="""aaaa""" )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , """does not appear to have a file named""" ):
lowerCAmelCase_ : List[Any] = cached_file(SCREAMING_SNAKE_CASE__ , """conf""" )
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , """does not appear to have a file named""" ):
lowerCAmelCase_ : int = cached_file(SCREAMING_SNAKE_CASE__ , """conf""" )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """refs""" , """main""" ) ) as f:
lowerCAmelCase_ : Tuple = f.read()
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , """.no_exist""" , SCREAMING_SNAKE_CASE__ , """conf""" ) ) )
lowerCAmelCase_ : Any = cached_file(SCREAMING_SNAKE_CASE__ , """conf""" , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = cached_file(SCREAMING_SNAKE_CASE__ , """conf""" , local_files_only=SCREAMING_SNAKE_CASE__ , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = mock.Mock()
lowerCAmelCase_ : str = 5_00
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Dict = HTTPError
lowerCAmelCase_ : Union[str, Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
lowerCAmelCase_ : Optional[int] = cached_file(SCREAMING_SNAKE_CASE__ , """conf""" , _raise_exceptions_for_connection_errors=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : List[Any] ) -> Dict:
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , SCREAMING_SNAKE_CASE__ ) )
def __lowercase ( self : Optional[int] ) -> List[Any]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , SCREAMING_SNAKE_CASE__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , SCREAMING_SNAKE_CASE__ , revision="""ahaha""" )
lowerCAmelCase_ : List[Any] = get_file_from_repo("""bert-base-cased""" , SCREAMING_SNAKE_CASE__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCAmelCase_ : Union[str, Any] = json.loads(open(SCREAMING_SNAKE_CASE__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 7_68 )
def __lowercase ( self : Dict ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(SCREAMING_SNAKE_CASE__ , """a.txt""" ) , str(SCREAMING_SNAKE_CASE__ ) )
self.assertIsNone(get_file_from_repo(SCREAMING_SNAKE_CASE__ , """b.txt""" ) )
| 120 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _snake_case ( pl.LightningModule ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int ):
super().__init__()
__lowerCamelCase : Any = model
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self : List[Any] ):
pass
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : str = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCamelCase : int = LightningModel(_UpperCamelCase )
__lowerCamelCase : Union[str, Any] = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCamelCase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 135 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( A__):
_lowerCAmelCase : Dict = (DDIMParallelScheduler,)
_lowerCAmelCase : Union[str, Any] = (('eta', 0.0), ('num_inference_steps', 5_0))
def _snake_case ( self : Optional[int] , **lowercase_ : List[Any] ):
snake_case_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def _snake_case ( self : Union[str, Any] , **lowercase_ : Tuple ):
snake_case_ : Optional[Any] = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = 10, 0.0
snake_case_ : Dict = self.dummy_model()
snake_case_ : int = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
snake_case_ : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def _snake_case ( self : List[Any] ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : Dict = self.get_scheduler_config(steps_offset=1 )
snake_case_ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _snake_case ( self : Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Tuple ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Any ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Optional[int] ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def _snake_case ( self : Union[str, Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Tuple ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Tuple ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Any ):
snake_case_ : Optional[Any] = self.scheduler_classes[0]
snake_case_ : str = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Union[str, Any] = self.dummy_sample_deter
snake_case_ : Tuple = self.dummy_sample_deter + 0.1
snake_case_ : Tuple = self.dummy_sample_deter - 0.1
snake_case_ : Dict = samplea.shape[0]
snake_case_ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case_ : str = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case_ : Any = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def _snake_case ( self : Tuple ):
snake_case_ : int = self.full_loop()
snake_case_ : int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def _snake_case ( self : Tuple ):
snake_case_ : List[str] = self.full_loop(prediction_type='''v_prediction''' )
snake_case_ : int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def _snake_case ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
snake_case_ : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
snake_case_ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def _snake_case ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
snake_case_ : Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
snake_case_ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 264 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
UpperCAmelCase__ = ['pixel_values']
def __init__( self : str , UpperCAmelCase__ : Tuple = True , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Optional[Any] = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Any] = True , UpperCAmelCase__ : str = None , UpperCAmelCase__ : str = True , UpperCAmelCase__ : Optional[Any] = 1 / 255 , UpperCAmelCase__ : int = True , UpperCAmelCase__ : Optional[int] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase__ : Any = IMAGENET_DEFAULT_STD , **UpperCAmelCase__ : Union[str, Any] , ) ->str:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__)
A__ = size if size is not None else {'''shortest_edge''': 224}
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__)
A__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='''crop_size''')
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int = PILImageResampling.BICUBIC , UpperCAmelCase__ : Any = None , **UpperCAmelCase__ : int , ) ->str:
'''simple docstring'''
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A__ = int((256 / 224) * size['''shortest_edge'''])
A__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__)
A__ = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
SCREAMING_SNAKE_CASE__ , size=(size_dict['''height'''], size_dict['''width''']) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] = None , **UpperCAmelCase__ : Tuple , ) ->int:
'''simple docstring'''
A__ = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any = None , **UpperCAmelCase__ : List[Any] , ) ->List[str]:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple = None , **UpperCAmelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Any = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->str:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__)
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='''crop_size''')
A__ = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
A__ = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
A__ = [self.resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for image in images]
if do_center_crop:
A__ = [self.center_crop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
A__ = [self.rescale(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
A__ = [self.normalize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for image in images]
A__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__)
| 14 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCAmelCase :Any = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCAmelCase :Any = {
'ctrl': 256,
}
_lowerCAmelCase :Optional[int] = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : int = set()
_UpperCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : List[Any] = char
_UpperCAmelCase : int = set(_UpperCamelCase )
return pairs
class _UpperCAmelCase ( A__ ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =CONTROL_CODES
def __init__( self , A , A , A="<unk>" , **A ) -> Any:
super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle:
_UpperCAmelCase : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
_UpperCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase : int = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCAmelCase : Tuple = {}
@property
def __lowerCAmelCase ( self ) -> List[str]:
return len(self.encoder )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : str = tuple(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_UpperCAmelCase : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
_UpperCAmelCase : int = min(SCREAMING_SNAKE_CASE__ , key=lambda A : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase : Tuple = bigram
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[int] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
_UpperCAmelCase : List[str] = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase : List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : List[str] = tuple(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
_UpperCAmelCase : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = '''@@ '''.join(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = word[:-4]
_UpperCAmelCase : List[Any] = word
return word
def __lowerCAmelCase ( self , A ) -> Union[str, Any]:
_UpperCAmelCase : Any = []
_UpperCAmelCase : List[str] = re.findall(r'''\S+\n?''' , SCREAMING_SNAKE_CASE__ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) )
return split_tokens
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , A ) -> Optional[int]:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __lowerCAmelCase ( self , A ) -> Any:
_UpperCAmelCase : Optional[Any] = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Optional[int]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : Dict = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
_UpperCAmelCase : str = 0
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_UpperCAmelCase : Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 263 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase__ ( A__ ):
'''simple docstring'''
UpperCamelCase = 'swinv2'
UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , a_ : Union[str, Any]=2_24 , a_ : List[str]=4 , a_ : Optional[Any]=3 , a_ : Tuple=96 , a_ : Any=[2, 2, 6, 2] , a_ : int=[3, 6, 12, 24] , a_ : Optional[int]=7 , a_ : Tuple=4.0 , a_ : int=True , a_ : str=0.0 , a_ : Any=0.0 , a_ : Tuple=0.1 , a_ : Tuple="gelu" , a_ : Dict=False , a_ : Union[str, Any]=0.0_2 , a_ : List[str]=1e-5 , a_ : List[Any]=32 , **a_ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : List[Any] = embed_dim
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Dict = len(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[Any] = num_heads
__UpperCAmelCase : int = window_size
__UpperCAmelCase : str = mlp_ratio
__UpperCAmelCase : int = qkv_bias
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = drop_path_rate
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Any = use_absolute_embeddings
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : str = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
__UpperCAmelCase : Dict = (0, 0, 0, 0)
| 226 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase : List[Any] =logging.get_logger('''transformers.models.speecht5''')
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[Any] ):
hf_model.apply_weight_norm()
lowercase_ :str = checkpoint['''input_conv.weight_g''']
lowercase_ :Optional[Any] = checkpoint['''input_conv.weight_v''']
lowercase_ :Tuple = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
lowercase_ :List[Any] = checkpoint[F'upsamples.{i}.1.weight_g']
lowercase_ :Dict = checkpoint[F'upsamples.{i}.1.weight_v']
lowercase_ :Any = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase_ :Dict = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
lowercase_ :Dict = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
lowercase_ :List[str] = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
lowercase_ :int = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
lowercase_ :str = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
lowercase_ :str = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
lowercase_ :Dict = checkpoint['''output_conv.1.weight_g''']
lowercase_ :List[str] = checkpoint['''output_conv.1.weight_v''']
lowercase_ :str = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : List[str]=None ,):
if config_path is not None:
lowercase_ :Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
lowercase_ :Optional[int] = SpeechTaHifiGanConfig()
lowercase_ :Optional[int] = SpeechTaHifiGan(_UpperCamelCase )
lowercase_ :List[Any] = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint["model"]["generator"] ,_UpperCamelCase ,_UpperCamelCase )
lowercase_ :Optional[Any] = np.load(_UpperCamelCase )
lowercase_ :List[str] = stats[0].reshape(-1 )
lowercase_ :List[Any] = stats[1].reshape(-1 )
lowercase_ :Dict = torch.from_numpy(_UpperCamelCase ).float()
lowercase_ :Union[str, Any] = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCAmelCase : Dict =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 223 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 141 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
a : Any = logging.get_logger(__name__)
class UpperCamelCase__ ( A__ ):
"""simple docstring"""
def __init__( self , **snake_case ):
'''simple docstring'''
requires_backends(self , ["bs4"] )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = []
UpperCAmelCase : Tuple = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase : str = parent.find_all(child.name , recursive=SCREAMING_SNAKE_CASE__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(SCREAMING_SNAKE_CASE__ ) else next(i for i, s in enumerate(SCREAMING_SNAKE_CASE__ , 1 ) if s is child ) )
UpperCAmelCase : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = BeautifulSoup(SCREAMING_SNAKE_CASE__ , "html.parser" )
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[Any] = []
for element in html_code.descendants:
if type(SCREAMING_SNAKE_CASE__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase : List[Any] = html.unescape(SCREAMING_SNAKE_CASE__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Tuple = self.xpath_soup(SCREAMING_SNAKE_CASE__ )
stringaxtag_seq.append(SCREAMING_SNAKE_CASE__ )
stringaxsubs_seq.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = ''''''
for tagname, subs in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = False
# Check that strings has a valid type
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : Any = True
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
if len(SCREAMING_SNAKE_CASE__ ) == 0 or isinstance(html_strings[0] , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : Any = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"but is of type {type(SCREAMING_SNAKE_CASE__ )}." )
UpperCAmelCase : Optional[int] = bool(isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(html_strings[0] , SCREAMING_SNAKE_CASE__ )) )
if not is_batched:
UpperCAmelCase : Optional[int] = [html_strings]
# Get nodes + xpaths
UpperCAmelCase : List[str] = []
UpperCAmelCase : Tuple = []
for html_string in html_strings:
UpperCAmelCase : Optional[Any] = self.get_three_from_single(SCREAMING_SNAKE_CASE__ )
nodes.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Any = []
for node, tag_list, sub_list in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : Tuple = self.construct_xpath(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
xpath_strings.append(SCREAMING_SNAKE_CASE__ )
xpaths.append(SCREAMING_SNAKE_CASE__ )
# return as Dict
UpperCAmelCase : Optional[Any] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCAmelCase : List[str] = BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_inputs
| 311 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 171 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __A ( a_ :Dict) -> Tuple:
__a : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:])
class __lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = StableDiffusionLatentUpscalePipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase = frozenset([] )
__lowerCAmelCase = True
@property
def _lowerCamelCase ( self ):
__a : Optional[Any] = 1
__a : List[str] = 4
__a : List[Any] = (16, 16)
__a : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
return image
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : Dict = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=SCREAMING_SNAKE_CASE__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=SCREAMING_SNAKE_CASE__ , only_cross_attention=SCREAMING_SNAKE_CASE__ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__a : List[str] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__a : List[Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
__a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
__a : List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Any = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__a : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : Any = '''cpu'''
__a : Any = self.get_dummy_components()
__a : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
__a : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
__a : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__a : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def _lowerCamelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _lowerCamelCase ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _lowerCamelCase ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _lowerCamelCase ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _lowerCamelCase ( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def _lowerCamelCase ( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _lowerCamelCase ( self ):
__a : List[str] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__a : Dict = self.get_dummy_components()
__a : Optional[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a : int = 2
__a : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a : Any = getattr(SCREAMING_SNAKE_CASE__ , scheduler_enum.name )
__a : Any = scheduler_cls.from_config(pipe.scheduler.config )
__a : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ )[0]
outputs.append(SCREAMING_SNAKE_CASE__ )
assert check_same_shape(SCREAMING_SNAKE_CASE__ )
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
__a : Any = torch.manual_seed(33 )
__a : str = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : List[Any] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__a : Dict = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='''latent''' ).images
__a : List[Any] = upscaler(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ).images[0]
__a : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _lowerCamelCase ( self ):
__a : int = torch.manual_seed(33 )
__a : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__a : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__a : str = upscaler(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ).images[0]
__a : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 160 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__A : Optional[int] = True
except (ImportError, AttributeError):
__A : Any = object
def UpperCamelCase_ ( *A__ : Optional[Any] , **A__ : List[str] ):
'''simple docstring'''
pass
__A : Tuple = False
__A : List[Any] = logging.get_logger("transformers-cli/serving")
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_UpperCamelCase , args.host , args.port , args.workers )
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase : Optional[int] ) -> List[str]:
lowerCAmelCase_ : Tuple = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=SCREAMING_SNAKE_CASE__ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=SCREAMING_SNAKE_CASE__ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=SCREAMING_SNAKE_CASE__ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=SCREAMING_SNAKE_CASE__ , help="""Model\'s name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE__ , help="""Model\'s config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=SCREAMING_SNAKE_CASE__ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Dict , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Tuple:
lowerCAmelCase_ : Any = pipeline
lowerCAmelCase_ : List[Any] = host
lowerCAmelCase_ : str = port
lowerCAmelCase_ : List[str] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'Serving model over {host}:{port}' )
lowerCAmelCase_ : List[str] = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def __lowercase ( self : Tuple ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __lowercase ( self : int ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Dict = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , lowerCamelCase : Dict = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ) -> Optional[int]:
try:
lowerCAmelCase_ : Optional[int] = self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
if return_ids:
lowerCAmelCase_ : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ , tokens_ids=SCREAMING_SNAKE_CASE__ )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE__ )} )
def __lowercase ( self : str , lowerCamelCase : Dict = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , lowerCamelCase : Any = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , lowerCamelCase : Union[str, Any] = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , ) -> str:
try:
lowerCAmelCase_ : Optional[Any] = self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return ServeDeTokenizeResult(model="""""" , text=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE__ )} )
async def __lowercase ( self : Optional[Any] , lowerCamelCase : int=Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ) -> Tuple:
# Check we don't have empty string
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCAmelCase_ : Optional[Any] = self._pipeline(SCREAMING_SNAKE_CASE__ )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(SCREAMING_SNAKE_CASE__ )} )
| 120 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 0 |
"""simple docstring"""
from functools import lru_cache
def lowercase_ ( _lowerCamelCase: List[str] ) -> set:
'''simple docstring'''
__lowerCamelCase : Optional[int] = 2
__lowerCamelCase : Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCamelCase )
if n > 1:
factors.add(_UpperCamelCase )
return factors
@lru_cache
def lowercase_ ( _lowerCamelCase: List[str] ) -> int:
'''simple docstring'''
return len(unique_prime_factors(_UpperCamelCase ) )
def lowercase_ ( _lowerCamelCase: Any ) -> bool:
'''simple docstring'''
return len(set(_UpperCamelCase ) ) in (0, 1)
def lowercase_ ( _lowerCamelCase: List[Any] ) -> list:
'''simple docstring'''
__lowerCamelCase : List[Any] = 2
while True:
# Increment each value of a generated range
__lowerCamelCase : Dict = [base + i for i in range(_UpperCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase : str = [upf_len(_UpperCamelCase ) for x in group]
checker.append(_UpperCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCamelCase ):
return group
# Increment our base variable by 1
base += 1
def lowercase_ ( _lowerCamelCase: Tuple = 4 ) -> int:
'''simple docstring'''
__lowerCamelCase : Dict = run(_UpperCamelCase )
return results[0] if len(_UpperCamelCase ) else None
if __name__ == "__main__":
print(solution()) | 135 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 0 |
"""simple docstring"""
import math
class _UpperCAmelCase :
def _snake_case ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any] ):
snake_case_ : Optional[int] = 0.0
snake_case_ : Any = 0.0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowercase ( ):
snake_case_ : Tuple = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ : Optional[int] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ : Dict = SelfOrganizingMap()
snake_case_ : Tuple = 3
snake_case_ : Optional[Any] = 0.5
for _ in range(_UpperCamelCase ):
for j in range(len(_UpperCamelCase ) ):
# training sample
snake_case_ : Optional[Any] = training_samples[j]
# Compute the winning vector
snake_case_ : Optional[Any] = self_organizing_map.get_winner(_UpperCamelCase , _UpperCamelCase )
# Update the winning vector
snake_case_ : int = self_organizing_map.update(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# classify test sample
snake_case_ : Optional[int] = [0, 0, 0, 1]
snake_case_ : str = self_organizing_map.get_winner(_UpperCamelCase , _UpperCamelCase )
# results
print(f"Clusters that the test sample belongs to : {winner}" )
print(f"Weights that have been trained : {weights}" )
# running the main() function
if __name__ == "__main__":
main()
| 264 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__))))
A__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__))
def SCREAMING_SNAKE_CASE ( self : int , **UpperCAmelCase__ : List[Any]) ->List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
A__ = '''adapt react readapt apt'''
A__ = '''adapt react readapt apt'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
A__ = '''adapt react readapt apt'''
A__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__)
| 14 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 0 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _UpperCAmelCase ( A__ ):
'''simple docstring'''
a__ =42
a__ =jnp.floataa
a__ =True
def __lowerCAmelCase ( self ) -> str:
super().setup()
_UpperCAmelCase : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *A , **A ) -> int:
_UpperCAmelCase : Dict = super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _UpperCAmelCase ( A__ ):
'''simple docstring'''
a__ =FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
def cross_entropy(UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=None ):
_UpperCAmelCase : str = logits.shape[-1]
_UpperCAmelCase : List[str] = (labels[..., None] == jnp.arange(_UpperCamelCase )[None]).astype('''f4''' )
_UpperCAmelCase : List[str] = jax.nn.log_softmax(_UpperCamelCase , axis=-1 )
_UpperCAmelCase : str = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_UpperCAmelCase : int = reduction(_UpperCamelCase )
return loss
_UpperCAmelCase : List[str] = partial(_UpperCamelCase , reduction=jnp.mean )
_UpperCAmelCase : Optional[int] = cross_entropy(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : List[Any] = cross_entropy(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : str = cross_entropy(_UpperCamelCase , _UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ ="google/bigbird-roberta-base"
a__ =3_0_0_0
a__ =1_0_5_0_0
a__ =1_2_8
a__ =3
a__ =1
a__ =5
# tx_args
a__ =3e-5
a__ =0.0
a__ =2_0_0_0_0
a__ =0.0095
a__ ="bigbird-roberta-natural-questions"
a__ ="training-expt"
a__ ="data/nq-training.jsonl"
a__ ="data/nq-validation.jsonl"
def __lowerCAmelCase ( self ) -> Optional[Any]:
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = os.path.join(self.base_dir , self.save_dir )
_UpperCAmelCase : Dict = self.batch_size_per_device * jax.device_count()
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =4_0_9_6 # no dynamic padding on TPUs
def __call__( self , A ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.collate_fn(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return batch
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : Optional[int] = self.fetch_inputs(features['''input_ids'''] )
_UpperCAmelCase : int = {
'''input_ids''': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : Any = [self._fetch_inputs(SCREAMING_SNAKE_CASE__ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self , A ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = [1 for _ in range(len(SCREAMING_SNAKE_CASE__ ) )]
while len(SCREAMING_SNAKE_CASE__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=None ):
if seed is not None:
_UpperCAmelCase : Optional[Any] = dataset.shuffle(seed=_UpperCamelCase )
for i in range(len(_UpperCamelCase ) // batch_size ):
_UpperCAmelCase : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_UpperCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , **UpperCamelCase__ : Any ):
def loss_fn(UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Any = model_inputs.pop('''start_labels''' )
_UpperCAmelCase : Any = model_inputs.pop('''end_labels''' )
_UpperCAmelCase : Any = model_inputs.pop('''pooled_labels''' )
_UpperCAmelCase : int = state.apply_fn(**_UpperCamelCase , params=_UpperCamelCase , dropout_rng=_UpperCamelCase , train=_UpperCamelCase )
_UpperCAmelCase : str = outputs
return state.loss_fn(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_UpperCAmelCase : Union[str, Any] = jax.random.split(_UpperCamelCase )
_UpperCAmelCase : Any = jax.value_and_grad(_UpperCamelCase )
_UpperCAmelCase : Tuple = grad_fn(state.params )
_UpperCAmelCase : List[Any] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_UpperCAmelCase : Optional[int] = jax.lax.pmean(_UpperCamelCase , '''batch''' )
_UpperCAmelCase : List[str] = state.apply_gradients(grads=_UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase_ (UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ):
_UpperCAmelCase : str = model_inputs.pop('''start_labels''' )
_UpperCAmelCase : List[Any] = model_inputs.pop('''end_labels''' )
_UpperCAmelCase : str = model_inputs.pop('''pooled_labels''' )
_UpperCAmelCase : Any = state.apply_fn(**_UpperCamelCase , params=state.params , train=_UpperCamelCase )
_UpperCAmelCase : Dict = outputs
_UpperCAmelCase : str = state.loss_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : int = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class _UpperCAmelCase ( train_state.TrainState ):
'''simple docstring'''
a__ =struct.field(pytree_node=A__ )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =42
a__ =42
a__ =42
a__ =42
a__ =42
a__ =None
def __lowerCAmelCase ( self , A , A , A , A=None ) -> int:
_UpperCAmelCase : List[Any] = model.params
_UpperCAmelCase : List[str] = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , loss_fn=SCREAMING_SNAKE_CASE__ , )
if ckpt_dir is not None:
_UpperCAmelCase : List[str] = restore_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_UpperCAmelCase : Optional[int] = build_tx(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Any = train_state.TrainState(
step=SCREAMING_SNAKE_CASE__ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , opt_state=SCREAMING_SNAKE_CASE__ , )
_UpperCAmelCase : int = args
_UpperCAmelCase : List[Any] = data_collator
_UpperCAmelCase : Dict = lr
_UpperCAmelCase : Union[str, Any] = params
_UpperCAmelCase : int = jax_utils.replicate(SCREAMING_SNAKE_CASE__ )
return state
def __lowerCAmelCase ( self , A , A , A ) -> Dict:
_UpperCAmelCase : int = self.args
_UpperCAmelCase : int = len(SCREAMING_SNAKE_CASE__ ) // args.batch_size
_UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
_UpperCAmelCase : Tuple = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
for epoch in range(args.max_epochs ):
_UpperCAmelCase : List[str] = jnp.array(0 , dtype=jnp.floataa )
_UpperCAmelCase : Optional[Any] = get_batched_dataset(SCREAMING_SNAKE_CASE__ , args.batch_size , seed=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc=f'Running EPOCH-{epoch}' ):
_UpperCAmelCase : str = self.data_collator(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = self.train_step_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_UpperCAmelCase : Optional[int] = jax_utils.unreplicate(state.step )
_UpperCAmelCase : List[Any] = running_loss.item() / i
_UpperCAmelCase : Optional[int] = self.scheduler_fn(state_step - 1 )
_UpperCAmelCase : List[str] = self.evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE__ ) )
self.logger.log(SCREAMING_SNAKE_CASE__ , commit=SCREAMING_SNAKE_CASE__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self , A , A ) -> str:
_UpperCAmelCase : Any = get_batched_dataset(SCREAMING_SNAKE_CASE__ , self.args.batch_size )
_UpperCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) // self.args.batch_size
_UpperCAmelCase : int = jnp.array(0 , dtype=jnp.floataa )
_UpperCAmelCase : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc='''Evaluating ... ''' ):
_UpperCAmelCase : str = self.data_collator(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = self.val_step_fn(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __lowerCAmelCase ( self , A , A ) -> str:
_UpperCAmelCase : List[Any] = jax_utils.unreplicate(SCREAMING_SNAKE_CASE__ )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(SCREAMING_SNAKE_CASE__ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE__ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE__ , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , SCREAMING_SNAKE_CASE__ )
print('''DONE''' )
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(_UpperCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_UpperCAmelCase : Any = from_bytes(state.params , f.read() )
with open(os.path.join(_UpperCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_UpperCAmelCase : Tuple = from_bytes(state.opt_state , f.read() )
_UpperCAmelCase : List[str] = joblib.load(os.path.join(_UpperCamelCase , '''args.joblib''' ) )
_UpperCAmelCase : Optional[Any] = joblib.load(os.path.join(_UpperCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(_UpperCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_UpperCAmelCase : int = json.load(_UpperCamelCase )
_UpperCAmelCase : int = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = num_train_steps - warmup_steps
_UpperCAmelCase : List[Any] = optax.linear_schedule(init_value=_UpperCamelCase , end_value=_UpperCamelCase , transition_steps=_UpperCamelCase )
_UpperCAmelCase : Optional[int] = optax.linear_schedule(init_value=_UpperCamelCase , end_value=1E-7 , transition_steps=_UpperCamelCase )
_UpperCAmelCase : List[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
def weight_decay_mask(UpperCamelCase__ : Tuple ):
_UpperCAmelCase : Dict = traverse_util.flatten_dict(_UpperCamelCase )
_UpperCAmelCase : int = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(_UpperCamelCase )
_UpperCAmelCase : Tuple = scheduler_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Dict = optax.adamw(learning_rate=_UpperCamelCase , weight_decay=_UpperCamelCase , mask=_UpperCamelCase )
return tx, lr
| 263 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
import math
def a ( _UpperCAmelCase : Optional[Any] = 1_00 ):
'''simple docstring'''
__UpperCAmelCase : Tuple = sum(i * i for i in range(1 , n + 1 ) )
__UpperCAmelCase : List[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Dict ='''▁'''
lowerCAmelCase : Union[str, Any] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class a_ ( A__ , unittest.TestCase ):
__A = BertGenerationTokenizer
__A = False
__A = True
def lowercase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
lowercase_ :str = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :int = '''<s>'''
lowercase_ :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_002 )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[Any] = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
lowercase_ :List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] , )
lowercase_ :Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ :List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase_ :Optional[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :int = '''Hello World!'''
lowercase_ :Optional[Any] = [18_536, 2_260, 101]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase_ :Any = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@require_torch
@slow
def lowercase__ ( self : Any ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowercase_ :List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase_ :List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase_ :Optional[int] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase_ :Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase_ :Dict = BertGenerationConfig()
lowercase_ :Union[str, Any] = BertGenerationEncoder(SCREAMING_SNAKE_CASE__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )
model(**SCREAMING_SNAKE_CASE__ )
@slow
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Optional[int] = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 223 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
UpperCAmelCase = get_logger(__name__)
class lowerCAmelCase :
def __init__( self : Tuple , __lowercase : Dict , __lowercase : Any=None ):
"""simple docstring"""
__lowercase =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowercase =module._original_module if isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) else module
class lowerCAmelCase :
lowerCAmelCase_ = []
def __init__( self : Tuple , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Tuple=None ):
"""simple docstring"""
__lowercase =obj
__lowercase =target
__lowercase =new
__lowercase =target.split('.' )[0]
__lowercase ={}
__lowercase =attrs or []
def __enter__( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
__lowercase =import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowercase =getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowercase =obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(SCREAMING_SNAKE_CASE__ , attrs=self.attrs ) )
__lowercase =getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , attrs=self.attrs ) )
__lowercase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowercase =getattr(import_module('.'.join(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE__ ) is attr_value:
__lowercase =getattr(self.obj , SCREAMING_SNAKE_CASE__ )
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowercase =globals()['''__builtins__'''][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Union[str, Any] , *__lowercase : Union[str, Any] ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.original.pop(SCREAMING_SNAKE_CASE__ ) )
def snake_case ( self : Dict ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 141 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCamelCase )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
SCREAMING_SNAKE_CASE__ : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
SCREAMING_SNAKE_CASE__ : List[int] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
SCREAMING_SNAKE_CASE__ : bool = field(default=A__ , metadata={"help": "Use FP16 to accelerate inference."} )
SCREAMING_SNAKE_CASE__ : bool = field(default=A__ , metadata={"help": "Benchmark training of model"} )
SCREAMING_SNAKE_CASE__ : bool = field(default=A__ , metadata={"help": "Verbose memory tracing"} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
SCREAMING_SNAKE_CASE__ : bool = field(default=A__ , metadata={"help": "Trace memory line by line"} )
SCREAMING_SNAKE_CASE__ : bool = field(default=A__ , metadata={"help": "Save result to a CSV file"} )
SCREAMING_SNAKE_CASE__ : bool = field(default=A__ , metadata={"help": "Save all print statements in a log file"} )
SCREAMING_SNAKE_CASE__ : bool = field(default=A__ , metadata={"help": "Whether to print environment information"} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
SCREAMING_SNAKE_CASE__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def A_ ( self ):
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , SCREAMING_SNAKE_CASE__ , )
def A_ ( self ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A_ ( self ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def A_ ( self ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 311 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 171 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 0 |
"""simple docstring"""
def __A ( a_ :List[Any] , a_ :Tuple) -> Optional[int]:
__a : Tuple = (boundary[1] - boundary[0]) / steps
__a : List[str] = boundary[0]
__a : Any = boundary[1]
__a : Union[str, Any] = make_points(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
__a : Optional[int] = 0.0
y += (h / 2.0) * f(_UpperCamelCase)
for i in x_i:
# print(i)
y += h * f(_UpperCamelCase)
y += (h / 2.0) * f(_UpperCamelCase)
return y
def __A ( a_ :Tuple , a_ :int , a_ :Optional[int]) -> Optional[int]:
__a : Any = a + h
while x < (b - h):
yield x
__a : Any = x + h
def __A ( a_ :Optional[Any]) -> Optional[Any]: # enter your function here
__a : Optional[int] = (x - 0) * (x - 0)
return y
def __A ( ) -> int:
__a : List[str] = 0.0 # Lower bound of integration
__a : List[Any] = 1.0 # Upper bound of integration
__a : int = 10.0 # define number of steps or resolution
__a : Optional[int] = [a, b] # define boundary of integration
__a : Optional[Any] = method_a(_UpperCamelCase , _UpperCamelCase)
print(F"""y = {y}""")
if __name__ == "__main__":
main() | 160 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__A : Optional[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A : Any = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
__A : str = {
"Salesforce/codegen-350M-mono": 2048,
}
class __snake_case ( A__):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = CodeGenTokenizer
def __init__( self : Any , lowerCamelCase : str=None , lowerCamelCase : str=None , lowerCamelCase : List[str]=None , lowerCamelCase : str="<|endoftext|>" , lowerCamelCase : List[Any]="<|endoftext|>" , lowerCamelCase : List[str]="<|endoftext|>" , lowerCamelCase : str=False , **lowerCamelCase : Any , ) -> List[str]:
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if kwargs.pop("""add_bos_token""" , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowerCAmelCase_ : int = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop("""type""" ) )
lowerCAmelCase_ : Union[str, Any] = add_prefix_space
lowerCAmelCase_ : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = add_prefix_space
def __lowercase ( self : List[str] , *lowerCamelCase : int , **lowerCamelCase : List[str] ) -> List[Any]:
lowerCAmelCase_ : List[Any] = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Any , *lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] = None ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int = False , lowerCamelCase : List[Any] = None , lowerCamelCase : str = None , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
lowerCAmelCase_ : List[str] = super().decode(
token_ids=SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase_ : Optional[Any] = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return decoded_text
def __lowercase ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple ) -> Union[str, Any]:
def find_re(lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : str ):
lowerCAmelCase_ : int = pattern.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return m.start() if m else -1
lowerCAmelCase_ : Dict = [re.compile(SCREAMING_SNAKE_CASE__ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCAmelCase_ : List[Any] = list(re.finditer("""^print""" , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowerCAmelCase_ : Any = completion[: prints[1].start()]
lowerCAmelCase_ : Optional[Any] = list(re.finditer("""^def""" , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowerCAmelCase_ : Union[str, Any] = completion[: defs[1].start()]
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE__ )]
else:
return completion
| 120 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
snake_case__ = StableUnCLIPPipeline
snake_case__ = TEXT_TO_IMAGE_PARAMS
snake_case__ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
snake_case__ = False
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Dict = 32
__lowerCamelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCamelCase : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=SCREAMING_SNAKE_CASE__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE__ , num_layers=1 , )
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE__ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE__ , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL()
__lowerCamelCase : int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
__lowerCamelCase : str = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[int] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[int] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCamelCase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCamelCase : List[Any] = pipe("anime turle" , generator=SCREAMING_SNAKE_CASE__ , output_type="np" )
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase : Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCamelCase : int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : Optional[int] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 135 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase):
@property
def _snake_case ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def _snake_case ( self : Optional[Any] ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def _snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
snake_case_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = self.dummy_uncond_unet
snake_case_ : Optional[int] = DDIMScheduler()
snake_case_ : Optional[int] = self.dummy_vq_model
snake_case_ : Optional[Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE__ , vqvae=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ldm.to(SCREAMING_SNAKE_CASE__ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : Dict = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='''numpy''' ).images
snake_case_ : Union[str, Any] = torch.manual_seed(0 )
snake_case_ : List[str] = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='''numpy''' , return_dict=SCREAMING_SNAKE_CASE__ )[0]
snake_case_ : str = image[0, -3:, -3:, -1]
snake_case_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Union[str, Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
snake_case_ : Tuple = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Optional[Any] ):
snake_case_ : Optional[Any] = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(SCREAMING_SNAKE_CASE__ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Tuple = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , output_type='''numpy''' ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ : Any = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
snake_case_ : Union[str, Any] = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 264 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
UpperCAmelCase__ = 'dpr'
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=30_522 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=3_072 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : List[Any]=1e-12 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : List[str]="absolute" , UpperCAmelCase__ : List[Any] = 0 , **UpperCAmelCase__ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = projection_dim
A__ = position_embedding_type
| 14 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
for param in module.parameters():
_UpperCAmelCase : Optional[int] = False
def lowerCamelCase_ ():
_UpperCAmelCase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase : str = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
_UpperCAmelCase : List[str] = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = datetime.now()
_UpperCAmelCase : str = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 263 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A ={"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 226 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ):
random.seed(_UpperCamelCase )
np.random.seed(_UpperCamelCase )
torch.manual_seed(_UpperCamelCase )
torch.cuda.manual_seed_all(_UpperCamelCase )
# ^^ safe to call this function even if cuda is not available
class a_ :
def __init__( self : Any , lowercase : Any , lowercase : List[str] = 0.99_99 , lowercase : List[Any] = 0.0 , lowercase : Any = 0 , lowercase : List[str] = False , lowercase : str = 1.0 , lowercase : str = 2 / 3 , lowercase : Optional[int] = None , lowercase : Tuple = None , **lowercase : Any , ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Module ):
lowercase_ :List[str] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ , )
lowercase_ :str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase_ :Optional[int] = True
if kwargs.get("max_value" , SCREAMING_SNAKE_CASE__ ) is not None:
lowercase_ :Any = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate("max_value" , "1.0.0" , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ )
lowercase_ :str = kwargs['''max_value''']
if kwargs.get("min_value" , SCREAMING_SNAKE_CASE__ ) is not None:
lowercase_ :Any = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate("min_value" , "1.0.0" , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ )
lowercase_ :Dict = kwargs['''min_value''']
lowercase_ :Dict = list(SCREAMING_SNAKE_CASE__ )
lowercase_ :str = [p.clone().detach() for p in parameters]
if kwargs.get("device" , SCREAMING_SNAKE_CASE__ ) is not None:
lowercase_ :int = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate("device" , "1.0.0" , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ )
self.to(device=kwargs["device"] )
lowercase_ :List[str] = None
lowercase_ :List[Any] = decay
lowercase_ :Union[str, Any] = min_decay
lowercase_ :Union[str, Any] = update_after_step
lowercase_ :Dict = use_ema_warmup
lowercase_ :Optional[Any] = inv_gamma
lowercase_ :Tuple = power
lowercase_ :Dict = 0
lowercase_ :Dict = None # set in `step()`
lowercase_ :Dict = model_cls
lowercase_ :Tuple = model_config
@classmethod
def lowercase__ ( cls : List[Any] , lowercase : List[Any] , lowercase : str ):
"""simple docstring"""
lowercase_ :str = model_cls.load_config(SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ )
lowercase_ :Optional[int] = model_cls.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase_ :Tuple = cls(model.parameters() , model_cls=SCREAMING_SNAKE_CASE__ , model_config=model.config )
ema_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
return ema_model
def lowercase__ ( self : List[Any] , lowercase : Optional[Any] ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
lowercase_ :Optional[int] = self.model_cls.from_config(self.model_config )
lowercase_ :List[Any] = self.state_dict()
state_dict.pop("shadow_params" , SCREAMING_SNAKE_CASE__ )
model.register_to_config(**SCREAMING_SNAKE_CASE__ )
self.copy_to(model.parameters() )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Dict , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase_ :Union[str, Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase_ :List[str] = (1 + step) / (10 + step)
lowercase_ :List[Any] = min(SCREAMING_SNAKE_CASE__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase_ :Tuple = max(SCREAMING_SNAKE_CASE__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Any , lowercase : List[str] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Module ):
lowercase_ :List[str] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ , )
lowercase_ :str = parameters.parameters()
lowercase_ :Optional[int] = list(SCREAMING_SNAKE_CASE__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase_ :Any = self.get_decay(self.optimization_step )
lowercase_ :str = decay
lowercase_ :List[str] = 1 - decay
lowercase_ :Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , SCREAMING_SNAKE_CASE__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase_ :str = deepspeed.zero.GatheredParameters(SCREAMING_SNAKE_CASE__ , modifier_rank=SCREAMING_SNAKE_CASE__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Dict , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :List[str] = list(SCREAMING_SNAKE_CASE__ )
for s_param, param in zip(self.shadow_params , SCREAMING_SNAKE_CASE__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : List[Any] , lowercase : Tuple=None , lowercase : Optional[Any]=None ):
"""simple docstring"""
lowercase_ :Union[str, Any] = [
p.to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ) if p.is_floating_point() else p.to(device=SCREAMING_SNAKE_CASE__ )
for p in self.shadow_params
]
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Dict = [param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowercase : List[str] ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , SCREAMING_SNAKE_CASE__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase_ :Optional[Any] = None
def lowercase__ ( self : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :int = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
lowercase_ :Optional[Any] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
lowercase_ :Tuple = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , SCREAMING_SNAKE_CASE__ ):
raise ValueError("Invalid min_decay" )
lowercase_ :List[Any] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , SCREAMING_SNAKE_CASE__ ):
raise ValueError("Invalid optimization_step" )
lowercase_ :Optional[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , SCREAMING_SNAKE_CASE__ ):
raise ValueError("Invalid update_after_step" )
lowercase_ :Optional[Any] = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , SCREAMING_SNAKE_CASE__ ):
raise ValueError("Invalid use_ema_warmup" )
lowercase_ :int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
lowercase_ :Optional[int] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
lowercase_ :List[str] = state_dict.get("shadow_params" , SCREAMING_SNAKE_CASE__ )
if shadow_params is not None:
lowercase_ :List[Any] = shadow_params
if not isinstance(self.shadow_params , SCREAMING_SNAKE_CASE__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 223 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : Optional[Any], lowercase__ : Dict, lowercase__ : Dict=None ):
'''simple docstring'''
__lowercase =XLNetConfig.from_json_file(_UpperCamelCase )
__lowercase =finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
__lowercase =finetuning_task
__lowercase =GLUE_TASKS_NUM_LABELS[finetuning_task]
__lowercase =XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
__lowercase =finetuning_task
__lowercase =XLNetForQuestionAnswering(_UpperCamelCase )
else:
__lowercase =XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
__lowercase =os.path.join(_UpperCamelCase, _UpperCamelCase )
__lowercase =os.path.join(_UpperCamelCase, _UpperCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(_UpperCamelCase )}''' )
torch.save(model.state_dict(), _UpperCamelCase )
print(F'''Save configuration file to {os.path.abspath(_UpperCamelCase )}''' )
with open(_UpperCamelCase, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
UpperCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 141 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowercase ( __magic_name__ = 8 ):
'''simple docstring'''
UpperCAmelCase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
i -= len(_UpperCamelCase )
UpperCAmelCase : Dict = i // 3
UpperCAmelCase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase , quotient + remainder )
+ random(_UpperCamelCase , _UpperCamelCase )
+ random(_UpperCamelCase , _UpperCamelCase )
)
UpperCAmelCase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
pass # Put your code here...
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
pass # Put your code here...
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
pass # Put your code here...
def lowercase ( __magic_name__ , __magic_name__ = 8 ):
'''simple docstring'''
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase : str = any(char in ascii_uppercase for char in password )
UpperCAmelCase : List[str] = any(char in ascii_lowercase for char in password )
UpperCAmelCase : Dict = any(char in digits for char in password )
UpperCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = int(input("Please indicate the max length of your password: " ).strip() )
UpperCAmelCase : Optional[Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_UpperCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_UpperCamelCase , _UpperCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 311 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 0 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 171 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 0 |
"""simple docstring"""
def __A ( a_ :Optional[int] , a_ :int) -> int:
return number | (1 << position)
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> int:
return number & ~(1 << position)
def __A ( a_ :Optional[int] , a_ :Any) -> int:
return number ^ (1 << position)
def __A ( a_ :int , a_ :Tuple) -> bool:
return ((number >> position) & 1) == 1
def __A ( a_ :Any , a_ :Optional[int]) -> int:
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 160 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : int = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( A__):
"""simple docstring"""
lowercase = 'cvt'
def __init__( self : List[str] , lowerCamelCase : Tuple=3 , lowerCamelCase : Tuple=[7, 3, 3] , lowerCamelCase : List[str]=[4, 2, 2] , lowerCamelCase : Dict=[2, 1, 1] , lowerCamelCase : Optional[int]=[64, 1_92, 3_84] , lowerCamelCase : Optional[Any]=[1, 3, 6] , lowerCamelCase : List[Any]=[1, 2, 10] , lowerCamelCase : List[str]=[4.0, 4.0, 4.0] , lowerCamelCase : Optional[Any]=[0.0, 0.0, 0.0] , lowerCamelCase : Union[str, Any]=[0.0, 0.0, 0.0] , lowerCamelCase : int=[0.0, 0.0, 0.1] , lowerCamelCase : Tuple=[True, True, True] , lowerCamelCase : Dict=[False, False, True] , lowerCamelCase : int=["dw_bn", "dw_bn", "dw_bn"] , lowerCamelCase : Dict=[3, 3, 3] , lowerCamelCase : List[Any]=[1, 1, 1] , lowerCamelCase : List[str]=[2, 2, 2] , lowerCamelCase : Optional[Any]=[1, 1, 1] , lowerCamelCase : Optional[int]=[1, 1, 1] , lowerCamelCase : Dict=0.02 , lowerCamelCase : Tuple=1E-12 , **lowerCamelCase : Dict , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Union[str, Any] = patch_sizes
lowerCAmelCase_ : List[Any] = patch_stride
lowerCAmelCase_ : Dict = patch_padding
lowerCAmelCase_ : Tuple = embed_dim
lowerCAmelCase_ : List[str] = num_heads
lowerCAmelCase_ : Dict = depth
lowerCAmelCase_ : Optional[Any] = mlp_ratio
lowerCAmelCase_ : str = attention_drop_rate
lowerCAmelCase_ : List[str] = drop_rate
lowerCAmelCase_ : Optional[Any] = drop_path_rate
lowerCAmelCase_ : str = qkv_bias
lowerCAmelCase_ : int = cls_token
lowerCAmelCase_ : Optional[int] = qkv_projection_method
lowerCAmelCase_ : Optional[Any] = kernel_qkv
lowerCAmelCase_ : Tuple = padding_kv
lowerCAmelCase_ : Optional[Any] = stride_kv
lowerCAmelCase_ : Dict = padding_q
lowerCAmelCase_ : List[Any] = stride_q
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
| 120 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__A = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowerCamelCase : Any = bs[:]
__lowerCamelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
__lowerCamelCase : int = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = set()
__lowerCamelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : Tuple = char
return pairs
class _snake_case ( A__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['input_ids', 'attention_mask']
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict="replace" , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : List[str]="</s>" , UpperCAmelCase : str="<s>" , UpperCAmelCase : int="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ):
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
__lowerCamelCase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
__lowerCamelCase : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
__lowerCamelCase : str = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Any = errors # how to handle errors in decoding
__lowerCamelCase : Union[str, Any] = bytes_to_unicode()
__lowerCamelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as merges_handle:
__lowerCamelCase : str = merges_handle.read().split("\n" )[1:-1]
__lowerCamelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase : int = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase : Tuple = re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCamelCase__ ( self : List[Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase : int = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , key=lambda UpperCAmelCase : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase : Tuple = bigram
__lowerCamelCase : int = []
__lowerCamelCase : int = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
__lowerCamelCase : Union[str, Any] = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : Union[str, Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : int = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
__lowerCamelCase : List[str] = get_pairs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = ''' '''.join(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = word
return word
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : List[str] = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(" " ) )
return bpe_tokens
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Tuple ):
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : str , UpperCAmelCase : Any ):
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Dict = ''''''.join(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase : Dict = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + "\n" )
__lowerCamelCase : Tuple = 0
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowerCamelCase : List[str] = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
__lowerCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : str = None , UpperCAmelCase : Optional[int] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] = None ):
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any]=False , **UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
__lowerCamelCase : Union[str, Any] = ''' ''' + text
return (text, kwargs) | 135 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 0 |
"""simple docstring"""
import os
def __lowercase ( ):
snake_case_ : int = os.path.join(os.path.dirname(_UpperCamelCase ) , '''num.txt''' )
with open(_UpperCamelCase ) as file_hand:
return str(sum(int(_UpperCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 264 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> list[float]:
"""simple docstring"""
A__ = coefficient_matrix.shape
A__ = constant_matrix.shape
if rowsa != colsa:
A__ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_UpperCamelCase )
if colsa != 1:
A__ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_UpperCamelCase )
if rowsa != rowsa:
A__ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_UpperCamelCase )
if len(_UpperCamelCase ) != rowsa:
A__ = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"""matrix but received {len(_UpperCamelCase )} and {rowsa}"""
)
raise ValueError(_UpperCamelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
A__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
A__ = table.shape
strictly_diagonally_dominant(_UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCamelCase ):
A__ = []
for row in range(_UpperCamelCase ):
A__ = 0
for col in range(_UpperCamelCase ):
if col == row:
A__ = table[row][col]
elif col == cols - 1:
A__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A__ = (temp + val) / denom
new_val.append(_UpperCamelCase )
A__ = new_val
return [float(_UpperCamelCase ) for i in new_val]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
"""simple docstring"""
A__ = table.shape
A__ = True
for i in range(0 , _UpperCamelCase ):
A__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 0 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase_ (UpperCamelCase__ : Any ):
def is_in_circle(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> bool:
_UpperCAmelCase : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase : str = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple = 0.0 , UpperCamelCase__ : Tuple = 1.0 , ):
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict = 0.0 , UpperCamelCase__ : str = 1.0 ):
def identity_function(UpperCamelCase__ : List[str] ) -> float:
return x
_UpperCAmelCase : Union[str, Any] = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
def function_to_integrate(UpperCamelCase__ : Optional[int] ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase : int = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
from __future__ import annotations
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
return [ord(_UpperCamelCase ) - 96 for elem in plain]
def a ( _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _UpperCamelCase )
print('''Decoded:''' , decode(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 226 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class a_ ( A__ ):
__A = ['image_processor']
__A = 'SamImageProcessor'
def __init__( self : Tuple , lowercase : Any ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
lowercase_ :Union[str, Any] = self.image_processor
lowercase_ :List[Any] = -10
lowercase_ :Dict = self.image_processor.size['''longest_edge''']
def __call__( self : Optional[Any] , lowercase : Optional[int]=None , lowercase : Optional[int]=None , lowercase : str=None , lowercase : Optional[int]=None , lowercase : Optional[int] = None , **lowercase : List[str] , ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ :List[Any] = encoding_image_processor['''original_sizes''']
if hasattr(SCREAMING_SNAKE_CASE__ , "numpy" ): # Checks if Torch or TF tensor
lowercase_ :List[str] = original_sizes.numpy()
lowercase_ :List[Any] = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , input_boxes=SCREAMING_SNAKE_CASE__ , )
lowercase_ :str = self._normalize_and_convert(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , input_boxes=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , )
return encoding_image_processor
def lowercase__ ( self : Optional[Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Optional[int]=None , lowercase : Any=None , lowercase : List[Any]=None , lowercase : str="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
lowercase_ :Optional[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , original_sizes[0] ) for point in input_points
]
else:
lowercase_ :Optional[int] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for point, original_size in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowercase_ :Any = self._pad_points_and_labels(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :Any = np.array(SCREAMING_SNAKE_CASE__ )
if input_labels is not None:
lowercase_ :Union[str, Any] = np.array(SCREAMING_SNAKE_CASE__ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
lowercase_ :int = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE__ )
for box in input_boxes
]
else:
lowercase_ :int = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , is_bounding_box=SCREAMING_SNAKE_CASE__ )
for box, original_size in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
lowercase_ :Any = np.array(SCREAMING_SNAKE_CASE__ )
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ :int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# boxes batch size of 1 by default
lowercase_ :List[str] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ :Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# boxes batch size of 1 by default
lowercase_ :Optional[int] = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowercase_ :Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase_ :Union[str, Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowercase_ :Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase_ :Dict = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowercase_ :Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase_ :Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ :str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase_ :Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def lowercase__ ( self : Union[str, Any] , lowercase : List[str] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = max([point.shape[0] for point in input_points] )
lowercase_ :str = []
for i, point in enumerate(SCREAMING_SNAKE_CASE__ ):
if point.shape[0] != expected_nb_points:
lowercase_ :Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowercase_ :Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE__ )
lowercase_ :Dict = processed_input_points
return input_points, input_labels
def lowercase__ ( self : Tuple , lowercase : Union[str, Any] , lowercase : Any , lowercase : Optional[Any] , lowercase : Optional[Any]=False ):
"""simple docstring"""
lowercase_ :Union[str, Any] = original_size
lowercase_ :List[Any] = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE__ , longest_edge=SCREAMING_SNAKE_CASE__ )
lowercase_ :Union[str, Any] = deepcopy(SCREAMING_SNAKE_CASE__ ).astype(SCREAMING_SNAKE_CASE__ )
if is_bounding_box:
lowercase_ :Any = coords.reshape(-1 , 2 , 2 )
lowercase_ :Any = coords[..., 0] * (new_w / old_w)
lowercase_ :List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ :Union[str, Any] = coords.reshape(-1 , 4 )
return coords
def lowercase__ ( self : Tuple , lowercase : Dict=None , lowercase : Optional[int]=None , lowercase : str=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , "numpy" ): # Checks for TF or Torch tensor
lowercase_ :Tuple = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE__ ):
raise ValueError("Input points must be a list of list of floating points." )
lowercase_ :Optional[Any] = [np.array(SCREAMING_SNAKE_CASE__ ) for input_point in input_points]
else:
lowercase_ :Dict = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , "numpy" ):
lowercase_ :str = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE__ ):
raise ValueError("Input labels must be a list of list integers." )
lowercase_ :Tuple = [np.array(SCREAMING_SNAKE_CASE__ ) for label in input_labels]
else:
lowercase_ :Any = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , "numpy" ):
lowercase_ :Tuple = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE__ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
lowercase_ :List[str] = [np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) for box in input_boxes]
else:
lowercase_ :List[Any] = None
return input_points, input_labels, input_boxes
@property
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE__ ) )
def lowercase__ ( self : Dict , *lowercase : List[Any] , **lowercase : int ):
"""simple docstring"""
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 223 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase :
def __init__( self : int ):
"""simple docstring"""
__lowercase ={}
def snake_case ( self : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
__lowercase ={}
def snake_case ( self : Any , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[int] ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowercase =probability
def snake_case ( self : Any ):
"""simple docstring"""
return list(self.connections )
def snake_case ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
__lowercase =0
__lowercase =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : Any, lowercase__ : int ):
'''simple docstring'''
__lowercase =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
__lowercase =Counter(graph.get_nodes() )
__lowercase =start
for _ in range(_UpperCamelCase ):
__lowercase =graph.transition(_UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCamelCase__ ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def A_ ( snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def A_ ( self , snake_case , snake_case , *snake_case , **snake_case ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
UpperCAmelCase : List[str] = kwargs.pop("main_process_only" , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : List[str] = kwargs.pop("in_order" , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
UpperCAmelCase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def lowercase ( __magic_name__ , __magic_name__ = None ):
'''simple docstring'''
if log_level is None:
UpperCAmelCase : str = os.environ.get("ACCELERATE_LOG_LEVEL" , _UpperCamelCase )
UpperCAmelCase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase , {} )
| 311 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A = logging.get_logger(__name__)
# TODO: upload to AWS
A = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __lowercase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 'retribert'
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Tuple = vocab_size
__a : Any = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : str = hidden_act
__a : Dict = intermediate_size
__a : Union[str, Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : int = type_vocab_size
__a : Union[str, Any] = initializer_range
__a : str = layer_norm_eps
__a : List[Any] = share_encoders
__a : str = projection_dim | 160 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : str = 0
for ch in input_str:
lowerCAmelCase_ : Dict = ord(_UpperCamelCase )
lowerCAmelCase_ : str = pow(2 , _UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 0 |
"""simple docstring"""
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = 0
for i in range(1 , 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution()) | 135 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Union[str, Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
_lowerCamelCase : str = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
_lowerCamelCase : Tuple = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
"""
_lowerCamelCase : str = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]=None) ->Tuple:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__)),
}
| 14 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 0 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = HfArgumentParser(_UpperCamelCase )
_UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
_UpperCAmelCase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_UpperCAmelCase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
_UpperCAmelCase : Any = ''''''
_UpperCAmelCase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
_UpperCAmelCase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 263 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
def a ( _UpperCAmelCase : Optional[Any] = 4_00_00_00 ):
'''simple docstring'''
__UpperCAmelCase : int = []
__UpperCAmelCase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
__UpperCAmelCase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a_ :
def __init__( self : List[str] , lowercase : Optional[int] , lowercase : Optional[int]=13 , lowercase : Tuple=7 , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Dict=True , lowercase : int=True , lowercase : int=99 , lowercase : str=32 , lowercase : Union[str, Any]=2 , lowercase : List[str]=4 , lowercase : Optional[Any]=37 , lowercase : Union[str, Any]="gelu" , lowercase : List[Any]=0.1 , lowercase : Dict=0.1 , lowercase : Any=512 , lowercase : Any=16 , lowercase : Optional[Any]=2 , lowercase : str=0.02 , lowercase : Optional[Any]=3 , lowercase : Dict=4 , lowercase : Dict=None , lowercase : Any=0 , ):
"""simple docstring"""
lowercase_ :List[Any] = parent
lowercase_ :str = batch_size
lowercase_ :Optional[int] = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[Any] = use_input_mask
lowercase_ :str = use_token_type_ids
lowercase_ :List[Any] = use_labels
lowercase_ :List[Any] = vocab_size
lowercase_ :Optional[Any] = hidden_size
lowercase_ :Union[str, Any] = num_hidden_layers
lowercase_ :Optional[Any] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Union[str, Any] = hidden_act
lowercase_ :Optional[Any] = hidden_dropout_prob
lowercase_ :Optional[int] = attention_probs_dropout_prob
lowercase_ :List[Any] = max_position_embeddings
lowercase_ :Any = type_vocab_size
lowercase_ :Tuple = type_sequence_label_size
lowercase_ :int = initializer_range
lowercase_ :Any = num_labels
lowercase_ :Optional[int] = num_choices
lowercase_ :int = scope
lowercase_ :str = projection_dim
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :int = None
if self.use_token_type_ids:
lowercase_ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Optional[Any] = None
lowercase_ :int = None
lowercase_ :Any = None
if self.use_labels:
lowercase_ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :str = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
lowercase_ :Optional[int] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : Dict , lowercase : str , lowercase : int ):
"""simple docstring"""
lowercase_ :Union[str, Any] = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
lowercase_ :List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase_ :List[str] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase_ :Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Any , lowercase : int , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : Optional[Any] ):
"""simple docstring"""
lowercase_ :str = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
lowercase_ :List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase_ :Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase_ :int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Dict , lowercase : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :List[str] = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
lowercase_ :List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.prepare_config_and_inputs()
(
lowercase_
) :int = config_and_inputs
lowercase_ :int = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class a_ ( A__ , A__ , unittest.TestCase ):
__A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__A = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
__A = False
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Any = TFDPRModelTester(self )
lowercase_ :Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :List[str] = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Union[str, Any] = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :str = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :List[Any] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowercase_ :int = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase_ :Tuple = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase_ :Optional[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 223 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 0 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : str ):
'''simple docstring'''
assert x is not None
assert y is not None
__lowercase =len(_UpperCamelCase )
__lowercase =len(_UpperCamelCase )
# declaring the array for storing the dp values
__lowercase =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1, m + 1 ):
for j in range(1, n + 1 ):
__lowercase =1 if x[i - 1] == y[j - 1] else 0
__lowercase =max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match )
__lowercase =''''''
__lowercase =m, n
while i > 0 and j > 0:
__lowercase =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowercase =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase = '''AGGTAB'''
UpperCAmelCase = '''GXTXAYB'''
UpperCAmelCase = 4
UpperCAmelCase = '''GTAB'''
UpperCAmelCase , UpperCAmelCase = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 141 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return array
UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase ), max(_UpperCamelCase )
# Compute the variables
UpperCAmelCase : Any = _max - _min + 1
UpperCAmelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase : Dict = i - _min
UpperCAmelCase : List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase : Any = 0
for i in range(_UpperCamelCase ):
while holes_repeat[i] > 0:
UpperCAmelCase : Any = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[int] = input("Enter numbers separated by comma:\n")
a : Optional[Any] = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 311 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 0 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_A = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
UpperCAmelCase__ : Any = primes[group]['''prime''']
UpperCAmelCase__ : List[str] = primes[group]['''generator''']
UpperCAmelCase__ : Tuple = int(hexlify(urandom(32 ) ) , base=16 )
def _a (self ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = pow(self.generator , self.__private_key , self.prime )
return hex(SCREAMING_SNAKE_CASE__ )[2:]
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = int(SCREAMING_SNAKE_CASE__ , base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase__ : Optional[int] = pow(SCREAMING_SNAKE_CASE__ , self.__private_key , self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
@staticmethod
def _a (_lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (prime - 1) // 2 , SCREAMING_SNAKE_CASE__ ) == 1
)
@staticmethod
def _a (_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 14 ):
"""simple docstring"""
UpperCAmelCase__ : Any = int(SCREAMING_SNAKE_CASE__ , base=16 )
UpperCAmelCase__ : Dict = int(SCREAMING_SNAKE_CASE__ , base=16 )
UpperCAmelCase__ : Dict = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase__ : Union[str, Any] = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 0 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : int ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ : List[str] = nn.Linear(3 , 4 )
lowerCAmelCase_ : Optional[Any] = nn.BatchNormad(4 )
lowerCAmelCase_ : Optional[int] = nn.Linear(4 , 5 )
def __lowercase ( self : Dict , lowerCamelCase : Optional[Any] ) -> List[Any]:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __snake_case ( A__):
"""simple docstring"""
def __lowercase ( self : List[Any] , lowerCamelCase : int , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ) -> Dict:
return (args[0] + 1,) + args[1:], kwargs
class __snake_case ( A__):
"""simple docstring"""
def __lowercase ( self : Dict , lowerCamelCase : str , lowerCamelCase : Dict ) -> Union[str, Any]:
return output + 1
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> List[Any]:
lowerCAmelCase_ : List[str] = ModelForTest()
lowerCAmelCase_ : Dict = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , """_hf_hook""" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , """_old_forward""" ) )
def __lowercase ( self : int ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = ModelForTest()
lowerCAmelCase_ : Optional[Any] = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , """_hf_hook""" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , """_old_forward""" ) )
def __lowercase ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = ModelForTest()
lowerCAmelCase_ : List[str] = torch.randn(2 , 3 )
lowerCAmelCase_ : Dict = test_model(x + 1 )
lowerCAmelCase_ : Union[str, Any] = test_model(x + 2 )
lowerCAmelCase_ : Union[str, Any] = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase_ : List[Any] = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase_ : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Dict = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 )
def __lowercase ( self : str ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = ModelForTest()
lowerCAmelCase_ : Union[str, Any] = torch.randn(2 , 3 )
lowerCAmelCase_ : List[Any] = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase_ : Tuple = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Tuple = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase_ : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1E-5 )
def __lowercase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase_ : Tuple = ModelForTest()
lowerCAmelCase_ : Optional[int] = torch.randn(2 , 3 )
lowerCAmelCase_ : List[str] = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : int = test_model(SCREAMING_SNAKE_CASE__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase_ : Tuple = torch.randn(2 , 3 )
lowerCAmelCase_ : Tuple = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : List[str] = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(0 ) )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase_ : str = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase_ : str = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Dict = torch.randn(2 , 3 )
lowerCAmelCase_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCAmelCase_ : Optional[int] = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase_ : str = torch.randn(2 , 3 )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase_ : Union[str, Any] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase_ : str = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = torch.randn(2 , 3 )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase_ : int = torch.randn(2 , 3 )
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __lowercase ( self : Any ) -> Optional[Any]:
lowerCAmelCase_ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase_ : int = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase_ : Tuple = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = torch.randn(2 , 3 )
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase_ : Dict = torch.randn(2 , 3 )
lowerCAmelCase_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 120 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 0 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__A = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__A = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : str=4 , UpperCAmelCase : Dict=False ):
__lowerCamelCase : Optional[int] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
(__lowerCamelCase) : List[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 135 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[str] , ):
snake_case_ : Optional[Any] = parent
snake_case_ : Any = 13
snake_case_ : List[str] = 7
snake_case_ : Union[str, Any] = True
snake_case_ : int = True
snake_case_ : int = True
snake_case_ : int = 99
snake_case_ : Optional[Any] = 32
snake_case_ : List[Any] = 2
snake_case_ : Any = 4
snake_case_ : List[str] = 37
snake_case_ : Any = '''gelu'''
snake_case_ : Optional[Any] = 0.1
snake_case_ : Optional[Any] = 0.1
snake_case_ : List[str] = 512
snake_case_ : List[str] = 16
snake_case_ : List[Any] = 2
snake_case_ : Tuple = 0.02
snake_case_ : List[Any] = 3
snake_case_ : str = 4
snake_case_ : Dict = None
def _snake_case ( self : Any ):
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Union[str, Any] = None
if self.use_input_mask:
snake_case_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
snake_case_ : Tuple = None
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : List[Any] ):
(
snake_case_
) : str = self.prepare_config_and_inputs()
snake_case_ : List[str] = True
snake_case_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ):
snake_case_ : Optional[int] = TFEsmModel(config=SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = [input_ids, input_mask]
snake_case_ : List[str] = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Union[str, Any] , ):
snake_case_ : List[str] = True
snake_case_ : List[Any] = TFEsmModel(config=SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
snake_case_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = [input_ids, input_mask]
snake_case_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ )
# Also check the case where encoder outputs are not passed
snake_case_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Any ):
snake_case_ : Optional[Any] = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str] ):
snake_case_ : Optional[int] = self.num_labels
snake_case_ : List[str] = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
(
snake_case_
) : List[str] = config_and_inputs
snake_case_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase):
_lowerCAmelCase : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : List[str] = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = False
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = TFEsmModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : Tuple ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : str ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _snake_case ( self : int ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _snake_case ( self : Dict ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _snake_case ( self : Dict ):
pass
def _snake_case ( self : Any ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
snake_case_ : Optional[Any] = model.get_bias()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for k, v in name.items():
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.Variable )
else:
snake_case_ : Optional[Any] = model.get_output_embeddings()
assert x is None
snake_case_ : Tuple = model.get_bias()
assert name is None
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
snake_case_ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )[0]
snake_case_ : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
snake_case_ : Optional[Any] = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
snake_case_ : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
snake_case_ : int = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 264 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.