code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class lowercase_ ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = "camembert"
def __init__( self : int, UpperCamelCase__ : Union[str, Any]=3_05_22, UpperCamelCase__ : Tuple=7_68, UpperCamelCase__ : Union[str, Any]=12, UpperCamelCase__ : Tuple=12, UpperCamelCase__ : Dict=30_72, UpperCamelCase__ : Optional[int]="gelu", UpperCamelCase__ : Union[str, Any]=0.1, UpperCamelCase__ : str=0.1, UpperCamelCase__ : Optional[int]=5_12, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : Optional[int]=0.02, UpperCamelCase__ : List[Any]=1e-12, UpperCamelCase__ : Union[str, Any]=1, UpperCamelCase__ : str=0, UpperCamelCase__ : Tuple=2, UpperCamelCase__ : Dict="absolute", UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Tuple=None, **UpperCamelCase__ : Tuple, ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase, bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase, **__lowerCamelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class lowercase_ ( __snake_case ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 107 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase : int = 'PoolFormerConfig'
# Base docstring
_lowerCAmelCase : str = 'sail/poolformer_s12'
_lowerCAmelCase : Any = [1, 5_1_2, 7, 7]
# Image classification docstring
_lowerCAmelCase : List[Any] = 'sail/poolformer_s12'
_lowerCAmelCase : str = 'tabby, tabby cat'
_lowerCAmelCase : Dict = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a_ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : bool = False ) -> Optional[int]:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
lowerCamelCase = 1 - drop_prob
lowerCamelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCamelCase = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCamelCase = input.div(A__ ) * random_tensor
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __snake_case : Optional[float] = None ) -> List[str]:
'''simple docstring'''
super().__init__()
lowerCamelCase = drop_prob
def lowerCamelCase__ ( self : int , __snake_case : torch.Tensor ) -> Optional[Any]:
'''simple docstring'''
return drop_path(__lowerCamelCase , self.drop_prob , self.training )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Dict=None ) -> int:
'''simple docstring'''
super().__init__()
lowerCamelCase = patch_size if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCamelCase = stride if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (stride, stride)
lowerCamelCase = padding if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (padding, padding)
lowerCamelCase = nn.Convad(__lowerCamelCase , __lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase )
lowerCamelCase = norm_layer(__lowerCamelCase ) if norm_layer else nn.Identity()
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Any ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.projection(__lowerCamelCase )
lowerCamelCase = self.norm(__lowerCamelCase )
return embeddings
class lowerCAmelCase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : Any , **__snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(1 , __lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.AvgPoolad(__lowerCamelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowerCamelCase )
def lowerCamelCase__ ( self : int , __snake_case : int ) -> List[Any]:
'''simple docstring'''
return self.pool(__lowerCamelCase ) - hidden_states
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict ) -> int:
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
lowerCamelCase = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
lowerCamelCase = PoolFormerDropPath(__lowerCamelCase )
if isinstance(config.hidden_act , __lowerCamelCase ):
lowerCamelCase = ACTaFN[config.hidden_act]
else:
lowerCamelCase = config.hidden_act
def lowerCamelCase__ ( self : Any , __snake_case : int ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.conva(__lowerCamelCase )
lowerCamelCase = self.act_fn(__lowerCamelCase )
lowerCamelCase = self.drop(__lowerCamelCase )
lowerCamelCase = self.conva(__lowerCamelCase )
lowerCamelCase = self.drop(__lowerCamelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __snake_case : int , __snake_case : List[str] , __snake_case : int , __snake_case : int , __snake_case : Dict , __snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase = PoolFormerPooling(__lowerCamelCase )
lowerCamelCase = PoolFormerOutput(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase = PoolFormerGroupNorm(__lowerCamelCase )
lowerCamelCase = PoolFormerGroupNorm(__lowerCamelCase )
# Useful for training neural nets
lowerCamelCase = PoolFormerDropPath(__lowerCamelCase ) if drop_path > 0.0 else nn.Identity()
lowerCamelCase = config.use_layer_scale
if config.use_layer_scale:
lowerCamelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCamelCase) ) , requires_grad=__lowerCamelCase )
lowerCamelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCamelCase) ) , requires_grad=__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
if self.use_layer_scale:
lowerCamelCase = self.pooling(self.before_norm(__lowerCamelCase ) )
lowerCamelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCamelCase = hidden_states + self.drop_path(__lowerCamelCase )
lowerCamelCase = ()
lowerCamelCase = self.output(self.after_norm(__lowerCamelCase ) )
lowerCamelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCamelCase = hidden_states + self.drop_path(__lowerCamelCase )
lowerCamelCase = (output,) + outputs
return outputs
else:
lowerCamelCase = self.drop_path(self.pooling(self.before_norm(__lowerCamelCase ) ) )
# First residual connection
lowerCamelCase = pooling_output + hidden_states
lowerCamelCase = ()
# Second residual connection inside the PoolFormerOutput block
lowerCamelCase = self.drop_path(self.output(self.after_norm(__lowerCamelCase ) ) )
lowerCamelCase = hidden_states + layer_output
lowerCamelCase = (output,) + outputs
return outputs
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase = config
# stochastic depth decay rule
lowerCamelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCamelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCamelCase = nn.ModuleList(__lowerCamelCase )
# Transformer blocks
lowerCamelCase = []
lowerCamelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCamelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowerCamelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowerCamelCase ) )
lowerCamelCase = nn.ModuleList(__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=False , __snake_case : List[Any]=True ) -> Dict:
'''simple docstring'''
lowerCamelCase = () if output_hidden_states else None
lowerCamelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCamelCase , lowerCamelCase = layers
# Get patch embeddings from hidden_states
lowerCamelCase = embedding_layer(__lowerCamelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowerCamelCase ):
lowerCamelCase = blk(__lowerCamelCase )
lowerCamelCase = layer_outputs[0]
if output_hidden_states:
lowerCamelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase , hidden_states=__lowerCamelCase )
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
snake_case = PoolFormerConfig
snake_case = 'poolformer'
snake_case = 'pixel_values'
snake_case = True
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCamelCase__ ( self : str , __snake_case : Tuple , __snake_case : Any=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase = value
_lowerCAmelCase : Optional[int] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCAmelCase : Dict = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __snake_case , )
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : str , __snake_case : str ) -> Optional[Any]:
'''simple docstring'''
super().__init__(__lowerCamelCase )
lowerCamelCase = config
lowerCamelCase = PoolFormerEncoder(__lowerCamelCase )
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ) -> List[str]:
'''simple docstring'''
lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
lowerCamelCase = self.encoder(
__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , )
lowerCamelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCamelCase , hidden_states=encoder_outputs.hidden_states , )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCamelCase__ ( self : Tuple , __snake_case : Tuple ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.dense(__lowerCamelCase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __snake_case , )
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : List[str] ) -> List[Any]:
'''simple docstring'''
super().__init__(__lowerCamelCase )
lowerCamelCase = config.num_labels
lowerCamelCase = PoolFormerModel(__lowerCamelCase )
# Final norm
lowerCamelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCamelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ) -> Dict:
'''simple docstring'''
lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase = self.poolformer(
__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , )
lowerCamelCase = outputs[0]
lowerCamelCase = self.classifier(self.norm(__lowerCamelCase ).mean([-2, -1] ) )
lowerCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase = 'single_label_classification'
else:
lowerCamelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCamelCase = MSELoss()
if self.num_labels == 1:
lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase = loss_fct(__lowerCamelCase , __lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase = CrossEntropyLoss()
lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase = BCEWithLogitsLoss()
lowerCamelCase = loss_fct(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states )
| 246 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
snake_case__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __snake_case ):
'''simple docstring'''
a__ = field(default=__snake_case , metadata={"""help""": """Whether to use SortishSampler or not."""} )
a__ = field(
default=__snake_case , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__ = field(
default=__snake_case , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
a__ = field(
default=__snake_case , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
a__ = field(
default=__snake_case , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__magic_name__ = v.to_dict()
return d
| 529 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A=13 , __A=32 , __A=3 , __A=4 , __A=[10, 20, 30, 40] , __A=[2, 2, 3, 2] , __A=True , __A=True , __A=37 , __A="gelu" , __A=10 , __A=0.02 , __A=["stage2", "stage3", "stage4"] , __A=[2, 3, 4] , __A=None , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : Dict = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Optional[Any] = num_stages
lowerCamelCase : Union[str, Any] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : Tuple = num_labels
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : List[str] = out_indices
lowerCamelCase : Tuple = scope
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Dict = ConvNextModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase : Optional[Any] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ConvNextForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase : Optional[int] = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : List[Any] = None
lowerCamelCase : str = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = config_and_inputs
lowerCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__A : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__A : List[Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__A : int = True
__A : List[str] = False
__A : Optional[int] = False
__A : Dict = False
__A : int = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(__lowerCamelCase )
lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Dict = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(__A , __A , __A ):
lowerCamelCase : Tuple = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = ConvNextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(__lowerCamelCase )
lowerCamelCase : List[Any] = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : List[str] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase : List[Any] = model(**__lowerCamelCase )
# verify the logits
lowerCamelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowerCamelCase : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , __snake_case ):
'''simple docstring'''
__A : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
__A : str = ConvNextConfig
__A : int = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = ConvNextModelTester(self )
| 340 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _lowerCamelCase:
@add_start_docstrings(__lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
class _lowerCamelCase:
@add_start_docstrings(__lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
class _lowerCamelCase( __snake_case ):
@add_start_docstrings(__lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
for processor in self:
_lowercase : Dict = inspect.signature(processor.__call__).parameters
if len(__lowerCamelCase) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys())} for '''
F'''{processor.__class__} are passed to the logits processor.''')
_lowercase : Optional[Any] = processor(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, **__lowerCamelCase)
else:
_lowercase : Tuple = processor(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase)
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''')
_lowercase : Optional[int] = temperature
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = scores / self.temperature
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase, lowerCamelCase = -float('Inf'), lowerCamelCase = 1) -> Any:
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''')
if not isinstance(__lowerCamelCase, __lowerCamelCase) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''')
_lowercase : Tuple = top_p
_lowercase : Optional[Any] = filter_value
_lowercase : List[str] = min_tokens_to_keep
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase : str = lax.top_k(__lowerCamelCase, scores.shape[-1])
_lowercase : Optional[int] = jnp.full_like(__lowerCamelCase, self.filter_value)
_lowercase : List[str] = jax.nn.softmax(__lowerCamelCase, axis=-1).cumsum(axis=-1)
_lowercase : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_lowercase : Any = jnp.roll(__lowerCamelCase, 1)
score_mask |= score_mask.at[:, 0].set(__lowerCamelCase)
# min tokens to keep
_lowercase : str = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCamelCase)
_lowercase : Optional[Any] = jnp.where(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase)
_lowercase : Union[str, Any] = jax.lax.sort_key_val(__lowerCamelCase, __lowerCamelCase)[-1]
return next_scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase, lowerCamelCase = -float('Inf'), lowerCamelCase = 1) -> Optional[int]:
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''')
_lowercase : List[Any] = max(__lowerCamelCase, __lowerCamelCase)
_lowercase : Union[str, Any] = filter_value
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : List[Any] = scores.shape
_lowercase : Dict = jnp.full(batch_size * vocab_size, self.filter_value)
_lowercase : Optional[Any] = min(self.top_k, scores.shape[-1]) # Safety check
_lowercase , _lowercase : List[Any] = lax.top_k(__lowerCamelCase, __lowerCamelCase)
_lowercase : List[str] = jnp.broadcast_to((jnp.arange(__lowerCamelCase) * vocab_size)[:, None], (batch_size, topk)).flatten()
_lowercase : Any = topk_scores.flatten()
_lowercase : str = topk_indices.flatten() + shift
_lowercase : Optional[int] = next_scores_flat.at[topk_indices_flat].set(__lowerCamelCase)
_lowercase : Optional[Any] = next_scores_flat.reshape(__lowerCamelCase, __lowerCamelCase)
return next_scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = bos_token_id
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = jnp.full(scores.shape, -float('inf'))
_lowercase : List[Any] = 1 - jnp.bool_(cur_len - 1)
_lowercase : str = jnp.where(__lowerCamelCase, new_scores.at[:, self.bos_token_id].set(0), __lowerCamelCase)
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = max_length
_lowercase : Any = eos_token_id
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = jnp.full(scores.shape, -float('inf'))
_lowercase : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1)
_lowercase : str = jnp.where(__lowerCamelCase, new_scores.at[:, self.eos_token_id].set(0), __lowerCamelCase)
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''')
if not isinstance(__lowerCamelCase, __lowerCamelCase) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''')
_lowercase : Union[str, Any] = min_length
_lowercase : Tuple = eos_token_id
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
_lowercase : Dict = jnp.where(__lowerCamelCase, scores.at[:, self.eos_token_id].set(-float('inf')), __lowerCamelCase)
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Any = list(__lowerCamelCase)
_lowercase : Optional[int] = begin_index
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = 1 - jnp.bool_(cur_len - self.begin_index)
_lowercase : Any = jnp.where(__lowerCamelCase, scores.at[:, self.begin_suppress_tokens].set(-float('inf')), __lowerCamelCase)
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = list(__lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = scores.at[..., self.suppress_tokens].set(-float('inf'))
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = dict(__lowerCamelCase)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_lowercase : Dict = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
_lowercase : Optional[int] = force_token_array.at[index].set(__lowerCamelCase)
_lowercase : List[str] = jnp.intaa(__lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
def _force_token(lowerCamelCase):
_lowercase : Optional[int] = scores.shape[0]
_lowercase : str = self.force_token_array[generation_idx]
_lowercase : Optional[int] = jnp.ones_like(__lowerCamelCase, dtype=scores.dtype) * -float('inf')
_lowercase : str = jnp.zeros((batch_size, 1), dtype=scores.dtype)
_lowercase : Optional[Any] = lax.dynamic_update_slice(__lowerCamelCase, __lowerCamelCase, (0, current_token))
return new_scores
_lowercase : Tuple = lax.cond(
cur_len >= self.force_token_array.shape[0], lambda: scores, lambda: lax.cond(
self.force_token_array[cur_len] >= 0, lambda: _force_token(__lowerCamelCase), lambda: scores, ), )
return scores
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Any = generate_config.eos_token_id
_lowercase : List[str] = generate_config.no_timestamps_token_id
_lowercase : List[Any] = generate_config.no_timestamps_token_id + 1
_lowercase : List[str] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowerCamelCase, 'max_initial_timestamp_index'):
_lowercase : List[str] = generate_config.max_initial_timestamp_index
else:
_lowercase : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_lowercase : Optional[Any] = model_config.vocab_size
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf'))
def handle_pairs(lowerCamelCase, lowerCamelCase):
_lowercase : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1, __lowerCamelCase, __lowerCamelCase)
_lowercase : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, __lowerCamelCase, )
_lowercase : List[Any] = jnp.where((cur_len - self.begin_index) < 2, __lowerCamelCase, __lowerCamelCase)
_lowercase : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin, __lowerCamelCase, __lowerCamelCase, )
return jnp.where(
__lowerCamelCase, jnp.where(
penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float('inf')), scores_k.at[: self.eos_token_id].set(-float('inf')), ), __lowerCamelCase, )
_lowercase : Optional[Any] = jax.vmap(__lowerCamelCase)(__lowerCamelCase, __lowerCamelCase)
_lowercase : Optional[int] = jnp.where(cur_len == self.begin_index, __lowerCamelCase, __lowerCamelCase)
_lowercase : List[str] = jnp.where(
self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, __lowerCamelCase, )
_lowercase : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
_lowercase : List[Any] = jnp.where(
__lowerCamelCase, scores.at[:, last_allowed + 1 :].set(-float('inf')), __lowerCamelCase, )
# if sum of probability over timestamps is above any other token, sample timestamp
_lowercase : Dict = jax.nn.log_softmax(__lowerCamelCase, axis=-1)
def handle_cumulative_probs(lowerCamelCase, lowerCamelCase):
_lowercase : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1)
_lowercase : List[Any] = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float('inf')), __lowerCamelCase, )
_lowercase : Tuple = jax.vmap(__lowerCamelCase)(__lowerCamelCase, __lowerCamelCase)
return scores
| 89 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCAmelCase = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_lowerCAmelCase = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_lowerCAmelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_lowerCAmelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_lowerCAmelCase = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_lowerCAmelCase = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_lowerCAmelCase = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = randrange(len(A__ ) ), randrange(len(A__ ) )
_lowerCAmelCase : Optional[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowerCAmelCase, _lowerCAmelCase : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase__ ( _lowerCamelCase = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [PokerHand(A__ ) for hand in SORTED_HANDS]
_lowerCAmelCase : Optional[Any] = poker_hands.copy()
shuffle(A__ )
_lowerCAmelCase : Optional[int] = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PokerHand('2C 4S AS 3D 5C' )
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[int] = os.path.abspath(os.path.dirname(A__ ) )
_lowerCAmelCase : List[str] = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
_lowerCAmelCase : Dict = line[:14].strip()
_lowerCAmelCase : Any = line[15:].strip()
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = PokerHand(A__ ), PokerHand(A__ )
_lowerCAmelCase : Dict = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 259 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_lowercase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_lowercase = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def UpperCamelCase ( snake_case__ , snake_case__):
with open(A__ , "r" , encoding="utf-8") as f:
lowerCAmelCase_ : List[Any] = json.loads(f.read())
lowerCAmelCase_ : List[Any] = collections.OrderedDict()
lowerCAmelCase_ : Optional[int] = collections.OrderedDict()
lowerCAmelCase_ : Optional[int] = collections.OrderedDict()
with open(A__ , "r" , encoding="utf-8") as f:
lowerCAmelCase_ : List[str] = f.readlines()
lowerCAmelCase_ : Tuple = [[t.rstrip("\n")] if (t == "," or "," not in t) else t.rstrip("\n").split(",") for t in token]
for idx, b in enumerate(A__):
lowerCAmelCase_ : List[Any] = b
lowerCAmelCase_ : Optional[int] = idx
for wd in b:
lowerCAmelCase_ : str = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __snake_case ( __snake_case ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Optional[int]="<|endoftext|>" ,lowerCAmelCase__ : int="<|endoftext|>" ,lowerCAmelCase__ : Dict="<|startoftext|>" ,lowerCAmelCase__ : Any="<|endoftext|>" ,lowerCAmelCase__ : List[str]=False ,**lowerCAmelCase__ : str ,) -> List[str]:
'''simple docstring'''
super().__init__(
unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,do_clean_text=__lowerCamelCase ,**__lowerCamelCase ,)
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
lowerCAmelCase_ : List[str] = do_clean_text
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = load_vocab_and_emoji(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase_ : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
return self.subword_tokenizer.tokenize(__lowerCamelCase ,clean=self.do_clean_text )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(__lowerCamelCase ,self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__lowerCamelCase )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : int = "".join(__lowerCamelCase ).strip()
return out_string
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : "Conversation" ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] )
if len(__lowerCamelCase ) > self.model_max_length:
lowerCAmelCase_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 0
if os.path.isdir(__lowerCamelCase ):
lowerCAmelCase_ : List[str] = os.path.join(
__lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[str] = os.path.join(
__lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
lowerCAmelCase_ : Any = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
lowerCAmelCase_ : int = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase_ : int = token_index
writer.write(",".join(__lowerCamelCase ) + "\n" )
index += 1
with open(__lowerCamelCase ,"w" ,encoding="utf-8" ) as writer:
json.dump(self.emoji ,__lowerCamelCase )
return vocab_file, emoji_file
class __snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = vocab # same as swe
lowerCAmelCase_ : Tuple = ids_to_tokens # same as bpe
lowerCAmelCase_ : List[Any] = emoji
lowerCAmelCase_ : Union[str, Any] = np.max([len(__lowerCamelCase ) for w in self.vocab.keys()] )
lowerCAmelCase_ : Union[str, Any] = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
lowerCAmelCase_ : Tuple = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
lowerCAmelCase_ : int = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
lowerCAmelCase_ : Any = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase_ : Optional[int] = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase_ : int = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
lowerCAmelCase_ : Optional[Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
lowerCAmelCase_ : Optional[int] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
lowerCAmelCase_ : List[str] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Any ) -> Dict:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = self.content_repattera.sub("<URL>" ,__lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.content_repattera.sub("<EMAIL>" ,__lowerCamelCase )
lowerCAmelCase_ : int = self.content_repattera.sub("<TEL>" ,__lowerCamelCase )
lowerCAmelCase_ : int = self.content_repattera.sub("<DATE>" ,__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.content_repattera.sub("<DATE>" ,__lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.content_repattera.sub("<PRICE>" ,__lowerCamelCase )
lowerCAmelCase_ : List[str] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase_ : Any = content.replace("<BLOCK><BLOCK>" ,"<BLOCK>" )
return content
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = text.replace(" " ,"<SP>" )
lowerCAmelCase_ : Any = text.replace(" " ,"<SP>" )
lowerCAmelCase_ : Dict = text.replace("\r\n" ,"<BR>" )
lowerCAmelCase_ : Optional[Any] = text.replace("\n" ,"<BR>" )
lowerCAmelCase_ : Optional[int] = text.replace("\r" ,"<BR>" )
lowerCAmelCase_ : str = text.replace("\t" ,"<TAB>" )
lowerCAmelCase_ : List[str] = text.replace("—" ,"ー" )
lowerCAmelCase_ : Dict = text.replace("−" ,"ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase_ : Dict = text.replace(__lowerCamelCase ,__lowerCamelCase )
if clean:
lowerCAmelCase_ : List[Any] = self.clean_text(__lowerCamelCase )
def check_simbol(lowerCAmelCase__ : Any ):
lowerCAmelCase_ : List[Any] = x.encode()
if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 2:
lowerCAmelCase_ : Tuple = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(lowerCAmelCase__ : Dict ):
lowerCAmelCase_ : List[str] = x.encode()
if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 3:
lowerCAmelCase_ : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28080 and c <= 0Xe2b07f:
return True
return False
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Any = []
while pos < len(__lowerCamelCase ):
lowerCAmelCase_ : Any = min(len(__lowerCamelCase ) ,pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
lowerCAmelCase_ : Tuple = [] # (token_id, token, pos)
for e in range(__lowerCamelCase ,__lowerCamelCase ,-1 ):
lowerCAmelCase_ : Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__lowerCamelCase ) > 2:
lowerCAmelCase_ : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__lowerCamelCase ) > 0:
# the smallest token_id is adopted
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = sorted(__lowerCamelCase ,key=lambda lowerCAmelCase__ : x[0] )[0]
result.append(__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = e
else:
lowerCAmelCase_ : Union[str, Any] = pos + 1
lowerCAmelCase_ : List[str] = text[pos:end]
if check_simbol(__lowerCamelCase ):
result.append("<KIGOU>" )
elif checkuae(__lowerCamelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
lowerCAmelCase_ : Dict = end
return result
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[Any]="\n" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__lowerCamelCase ) > 0:
words.append(bytearray(__lowerCamelCase ).decode("utf-8" ,errors="replace" ) )
lowerCAmelCase_ : str = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__lowerCamelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
words.append(bytearray(__lowerCamelCase ).decode("utf-8" ,errors="replace" ) )
lowerCAmelCase_ : Optional[int] = "".join(__lowerCamelCase )
return text
| 659 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=10 ) -> Optional[int]:
UpperCAmelCase_ = []
for _ in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=10 ) -> str:
UpperCAmelCase_ = []
for step in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(A__ , "schedule.bin" )
torch.save(scheduler.state_dict() , A__ )
UpperCAmelCase_ = torch.load(A__ )
scheduler.load_state_dict(A__ )
return lrs
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
UpperCAmelCase_ = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase_ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
UpperCAmelCase_ = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A__ ( self ):
UpperCAmelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
UpperCAmelCase_ = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase_ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCamelCase , weight_decay=0.0 , relative_step=__lowerCamelCase , scale_parameter=__lowerCamelCase , warmup_init=__lowerCamelCase , )
for _ in range(1000 ):
UpperCAmelCase_ = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = nn.Linear(50, 50 ) if is_torch_available() else None
lowerCAmelCase_ : int = AdamW(m.parameters(), lr=1_0.0 ) if is_torch_available() else None
lowerCAmelCase_ : Optional[Any] = 10
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase , msg=__lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase_ , UpperCAmelCase_ = data
UpperCAmelCase_ = scheduler_func(self.optimizer , **__lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase_ = unwrap_schedule(__lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCamelCase , __lowerCamelCase , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
UpperCAmelCase_ = scheduler_func(self.optimizer , **__lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCamelCase ) # wrap to test picklability of the schedule
UpperCAmelCase_ = unwrap_and_save_reload_schedule(__lowerCamelCase , self.num_steps )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = fn
def __call__( self , *lowerCAmelCase , **lowerCAmelCase ):
return self.fn(*__lowerCamelCase , **__lowerCamelCase )
@classmethod
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = list(map(self , scheduler.lr_lambdas ) )
| 579 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
"""simple docstring"""
def __snake_case ( __A ) -> List[str]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
lowercase : List[Any] = [True] * (num + 1)
lowercase : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,A__ ):
lowercase : Optional[Any] = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Tuple =int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 607 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = get_activation('''swish''' )
self.assertIsInstance(__lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = get_activation('''silu''' )
self.assertIsInstance(__lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = get_activation('''mish''' )
self.assertIsInstance(__lowerCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = get_activation('''gelu''' )
self.assertIsInstance(__lowerCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 175 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : str = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCAmelCase : str = logging.getLogger(__name__)
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : List[str]=-1 ) -> Any:
'''simple docstring'''
lowerCamelCase = label_idx
def lowerCamelCase__ ( self : str , __snake_case : Dict , __snake_case : Union[Split, str] ) -> Optional[int]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase = mode.value
lowerCamelCase = os.path.join(__lowerCamelCase , F'''{mode}.txt''' )
lowerCamelCase = 1
lowerCamelCase = []
with open(__lowerCamelCase , encoding='utf-8' ) as f:
lowerCamelCase = []
lowerCamelCase = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__lowerCamelCase , labels=__lowerCamelCase ) )
guid_index += 1
lowerCamelCase = []
lowerCamelCase = []
else:
lowerCamelCase = line.split(' ' )
words.append(splits[0] )
if len(__lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__lowerCamelCase , labels=__lowerCamelCase ) )
return examples
def lowerCamelCase__ ( self : Dict , __snake_case : TextIO , __snake_case : TextIO , __snake_case : List ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__lowerCamelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def lowerCamelCase__ ( self : Dict , __snake_case : str ) -> Dict:
'''simple docstring'''
if path:
with open(__lowerCamelCase , 'r' ) as f:
lowerCamelCase = f.read().splitlines()
if "O" not in labels:
lowerCamelCase = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(label_idx=-2 )
def lowerCamelCase__ ( self : int , __snake_case : str ) -> int:
'''simple docstring'''
if path:
with open(__lowerCamelCase , 'r' ) as f:
lowerCamelCase = f.read().splitlines()
if "O" not in labels:
lowerCamelCase = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def lowerCamelCase__ ( self : int , __snake_case : Optional[int] , __snake_case : Union[Split, str] ) -> Tuple:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase = mode.value
lowerCamelCase = os.path.join(__lowerCamelCase , F'''{mode}.txt''' )
lowerCamelCase = 1
lowerCamelCase = []
with open(__lowerCamelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(__lowerCamelCase ):
lowerCamelCase = []
lowerCamelCase = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__lowerCamelCase , labels=__lowerCamelCase ) )
guid_index += 1
return examples
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : TextIO , __snake_case : TextIO , __snake_case : List ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 0
for sentence in parse_incr(__lowerCamelCase ):
lowerCamelCase = preds_list[example_id]
lowerCamelCase = ''
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(__lowerCamelCase )
example_id += 1
def lowerCamelCase__ ( self : List[Any] , __snake_case : str ) -> Optional[Any]:
'''simple docstring'''
if path:
with open(__lowerCamelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 246 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
snake_case__ : Tuple = 2_9_9_7_9_2_4_5_8
# Symbols
snake_case__ : Dict = symbols("""ct x y z""")
def _snake_case (__lowercase):
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!')
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!')
return velocity / c
def _snake_case (__lowercase):
return 1 / sqrt(1 - beta(A__) ** 2)
def _snake_case (__lowercase):
return np.array(
[
[gamma(A__), -gamma(A__) * beta(A__), 0, 0],
[-gamma(A__) * beta(A__), gamma(A__), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
def _snake_case (__lowercase , __lowercase = None):
# Ensure event is not empty
if event is None:
UpperCamelCase_ = np.array([ct, x, y, z]) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(A__) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
snake_case__ : List[str] = transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
snake_case__ : List[str] = {ct: c, x: 1, y: 1, z: 1}
snake_case__ : str = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 23 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : str , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = 3
__magic_name__ = 250
__magic_name__ = ids_tensor((batch_size, length) , __lowerCamelCase )
__magic_name__ = torch.ones((batch_size, length) , device=__lowerCamelCase , dtype=torch.float ) / length
return input_ids, scores
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self._get_tensors(5 )
__magic_name__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ , __magic_name__ = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ , __magic_name__ = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCamelCase , __lowerCamelCase ) )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = MaxLengthCriteria(max_length=10 )
__magic_name__ , __magic_name__ = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ , __magic_name__ = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ , __magic_name__ = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCamelCase , __lowerCamelCase ) )
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
__magic_name__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__magic_name__ , __magic_name__ = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ , __magic_name__ = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ , __magic_name__ = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ = self._get_tensors(5 )
__magic_name__ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__lowerCamelCase , __lowerCamelCase ) )
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__lowerCamelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__magic_name__ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
| 529 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_snake_case = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A=16 , __A=13 , __A=7 , __A=14 , __A=10 , __A=19 , __A=5 , __A=4 , __A=True , __A=16 , __A=2 , __A=4 , __A=4 , __A="gelu" , __A=0.1 , __A=0.1 , __A=[1, 2, 3, 4, 5] , __A=25 , __A=5 , ):
"""simple docstring"""
lowerCamelCase : Any = d_model
lowerCamelCase : Any = parent
lowerCamelCase : str = batch_size
lowerCamelCase : Tuple = prediction_length
lowerCamelCase : Dict = context_length
lowerCamelCase : Optional[int] = cardinality
lowerCamelCase : Any = num_time_features
lowerCamelCase : List[Any] = lags_sequence
lowerCamelCase : Dict = embedding_dimension
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : str = intermediate_size
lowerCamelCase : Any = hidden_act
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : Any = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = context_length
lowerCamelCase : str = prediction_length + label_length
lowerCamelCase : Optional[Any] = label_length
lowerCamelCase : List[str] = moving_average
lowerCamelCase : Optional[int] = autocorrelation_factor
def _snake_case ( self ):
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = config.context_length + max(config.lags_sequence )
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase : List[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, _past_length] )
lowerCamelCase : str = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase : Optional[int] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_config()
lowerCamelCase : List[str] = self.prepare_autoformer_inputs_dict(__lowerCamelCase )
return config, inputs_dict
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = AutoformerModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = model(**__lowerCamelCase )
lowerCamelCase : str = outputs.encoder_last_hidden_state
lowerCamelCase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Dict = model.get_encoder()
encoder.save_pretrained(__lowerCamelCase )
lowerCamelCase : Dict = AutoformerEncoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : str = model.create_network_inputs(**__lowerCamelCase )
lowerCamelCase , lowerCamelCase : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase : List[str] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase : int = encoder(inputs_embeds=__lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCamelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase : Any = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase : Union[str, Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : List[str] = model.get_decoder()
decoder.save_pretrained(__lowerCamelCase )
lowerCamelCase : List[str] = AutoformerDecoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
lowerCamelCase : Optional[Any] = decoder(
trend=__lowerCamelCase , inputs_embeds=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__A : str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__A : Optional[int] = (AutoformerForPrediction,) if is_torch_available() else ()
__A : List[str] = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__A : Any = False
__A : Tuple = False
__A : Optional[int] = False
__A : Dict = False
__A : Union[str, Any] = False
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = AutoformerModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = model_class(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
lowerCamelCase , lowerCamelCase : int = model_class.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertEqual(info["missing_keys"] , [] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowerCamelCase )
@unittest.skip(reason="Model has no tokens embeddings" )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = inspect.signature(getattr(__lowerCamelCase , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase : Tuple = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : str = model_class(__lowerCamelCase )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[Any] = [*signature.parameters.keys()]
lowerCamelCase : Any = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__lowerCamelCase )] , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = True
lowerCamelCase : Tuple = getattr(self.model_tester , "seq_length" , __lowerCamelCase )
lowerCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , __lowerCamelCase )
lowerCamelCase : int = getattr(self.model_tester , "encoder_seq_length" , __lowerCamelCase )
lowerCamelCase : List[Any] = getattr(self.model_tester , "d_model" , __lowerCamelCase )
lowerCamelCase : List[str] = getattr(self.model_tester , "num_attention_heads" , __lowerCamelCase )
lowerCamelCase : List[str] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[str] = False
lowerCamelCase : int = True
lowerCamelCase : Optional[int] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : List[Any] = True
lowerCamelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase : List[Any] = outputs.encoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase : Optional[int] = len(__lowerCamelCase )
lowerCamelCase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# decoder attentions
lowerCamelCase : int = outputs.decoder_attentions
self.assertIsInstance(__lowerCamelCase , (list, tuple) )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase : str = outputs.cross_attentions
self.assertIsInstance(__lowerCamelCase , (list, tuple) )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase : Any = True
lowerCamelCase : int = True
lowerCamelCase : str = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(__lowerCamelCase ) )
lowerCamelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _snake_case ( self ):
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase_( SCREAMING_SNAKE_CASE_="train-batch.pt" ):
'''simple docstring'''
lowerCamelCase : List[Any] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A__ , repo_type="dataset" )
lowerCamelCase : str = torch.load(A__ , map_location=A__ )
return batch
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__lowerCamelCase )
lowerCamelCase : List[str] = prepare_batch()
with torch.no_grad():
lowerCamelCase : Dict = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase : List[Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase : List[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__lowerCamelCase )
lowerCamelCase : Any = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase : List[str] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase : str = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase : Dict = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__lowerCamelCase )
lowerCamelCase : Any = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase : str = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __lowerCamelCase )
lowerCamelCase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowerCamelCase , rtol=1e-1 ) )
| 340 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_="cosine" , ) -> int:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase_ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowercase : str = []
for i in range(A__ ):
_lowercase : Optional[int] = i / num_diffusion_timesteps
_lowercase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class _lowerCamelCase( __snake_case, __snake_case ):
lowercase_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
lowercase_ : Dict = 2
@register_to_config
def __init__( self, lowerCamelCase = 10_00, lowerCamelCase = 0.0_0_0_8_5, lowerCamelCase = 0.0_1_2, lowerCamelCase = "linear", lowerCamelCase = None, lowerCamelCase = "epsilon", lowerCamelCase = "linspace", lowerCamelCase = 0, ) -> Tuple:
"""simple docstring"""
if trained_betas is not None:
_lowercase : Dict = torch.tensor(__lowerCamelCase, dtype=torch.floataa)
elif beta_schedule == "linear":
_lowercase : Union[str, Any] = torch.linspace(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Tuple = (
torch.linspace(beta_start**0.5, beta_end**0.5, __lowerCamelCase, dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Tuple = betas_for_alpha_bar(__lowerCamelCase)
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''')
_lowercase : List[str] = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas, dim=0)
# set all values
self.set_timesteps(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> List[Any]:
"""simple docstring"""
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
_lowercase : int = 1 if len(__lowerCamelCase) > 1 else 0
else:
_lowercase : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase) else timestep
_lowercase : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.index_for_timestep(__lowerCamelCase)
if self.state_in_first_order:
_lowercase : Dict = self.sigmas[step_index]
else:
_lowercase : List[Any] = self.sigmas_interpol[step_index]
_lowercase : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, ) -> Tuple:
"""simple docstring"""
_lowercase : Any = num_inference_steps
_lowercase : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Optional[Any] = np.linspace(0, num_train_timesteps - 1, __lowerCamelCase, dtype=__lowerCamelCase)[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : int = (np.arange(0, __lowerCamelCase) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(__lowerCamelCase, 0, -step_ratio)).round().copy().astype(__lowerCamelCase)
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''')
_lowercase : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
_lowercase : Optional[Any] = torch.from_numpy(np.log(__lowerCamelCase)).to(__lowerCamelCase)
_lowercase : Union[str, Any] = np.interp(__lowerCamelCase, np.arange(0, len(__lowerCamelCase)), __lowerCamelCase)
_lowercase : Tuple = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
_lowercase : int = torch.from_numpy(__lowerCamelCase).to(device=__lowerCamelCase)
# interpolate sigmas
_lowercase : Dict = sigmas.log().lerp(sigmas.roll(1).log(), 0.5).exp()
_lowercase : Tuple = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
_lowercase : int = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(__lowerCamelCase).startswith('mps'):
# mps does not support float64
_lowercase : str = torch.from_numpy(__lowerCamelCase).to(__lowerCamelCase, dtype=torch.floataa)
else:
_lowercase : List[Any] = torch.from_numpy(__lowerCamelCase).to(__lowerCamelCase)
# interpolate timesteps
_lowercase : Dict = self.sigma_to_t(__lowerCamelCase).to(__lowerCamelCase, dtype=timesteps.dtype)
_lowercase : str = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1).flatten()
_lowercase : Dict = torch.cat([timesteps[:1], interleaved_timesteps])
_lowercase : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Any = defaultdict(__lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : int = sigma.log()
# get distribution
_lowercase : Dict = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Optional[Any] = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
_lowercase : Union[str, Any] = low_idx + 1
_lowercase : Any = self.log_sigmas[low_idx]
_lowercase : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : List[Any] = (low - log_sigma) / (low - high)
_lowercase : List[Any] = w.clamp(0, 1)
# transform interpolation to time range
_lowercase : Optional[Any] = (1 - w) * low_idx + w * high_idx
_lowercase : Tuple = t.view(sigma.shape)
return t
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return self.sample is None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = True, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.index_for_timestep(__lowerCamelCase)
# advance index counter by 1
_lowercase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : List[Any] = self.sigmas[step_index]
_lowercase : str = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : List[str] = self.sigmas[step_index - 1]
_lowercase : str = self.sigmas_interpol[step_index]
_lowercase : str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Union[str, Any] = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Dict = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''')
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Dict = sigma_next - sigma_hat
_lowercase : Tuple = self.sample
_lowercase : Union[str, Any] = None
_lowercase : Union[str, Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : int = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device, dtype=torch.floataa)
_lowercase : str = timesteps.to(original_samples.device, dtype=torch.floataa)
else:
_lowercase : Dict = self.timesteps.to(original_samples.device)
_lowercase : Optional[Any] = timesteps.to(original_samples.device)
_lowercase : Tuple = [self.index_for_timestep(__lowerCamelCase, __lowerCamelCase) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
_lowercase : Optional[Any] = sigma.unsqueeze(-1)
_lowercase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self) -> List[str]:
"""simple docstring"""
return self.config.num_train_timesteps
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __UpperCamelCase ( __snake_case ):
_UpperCAmelCase = "audio-spectrogram-transformer"
def __init__( self ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1E-12 ,_A=16 ,_A=True ,_A=10 ,_A=10 ,_A=1024 ,_A=128 ,**_A ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : List[Any] = frequency_stride
_lowerCAmelCase : str = time_stride
_lowerCAmelCase : List[str] = max_length
_lowerCAmelCase : List[Any] = num_mel_bins
| 259 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = len([g for position, g in enumerate(A__) if g == main_target[position]])
return (item, float(A__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = random.randint(0 , len(A__) - 1)
lowerCAmelCase_ : str = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase_ : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(A__)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
lowerCAmelCase_ : List[Any] = random.choice(A__)
return "".join(A__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase_ : Optional[Any] = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase_ : Optional[Any] = int(parent_a[1] * 1_00) + 1
lowerCAmelCase_ : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(A__):
lowerCAmelCase_ : Optional[Any] = population_score[random.randint(0 , A__)][0]
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = crossover(parent_a[0] , A__)
# Append new string to the population list.
pop.append(mutate(A__ , A__))
pop.append(mutate(A__ , A__))
return pop
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = True):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase_ : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(A__)
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase_ : int = sorted({c for c in target if c not in genes})
if not_in_genes_list:
lowerCAmelCase_ : Any = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(A__)
# Generate random starting population.
lowerCAmelCase_ : int = []
for _ in range(A__):
population.append("".join([random.choice(A__) for i in range(len(A__))]))
# Just some logs to know what the algorithms is doing.
lowerCAmelCase_ , lowerCAmelCase_ : Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase_ : Tuple = [evaluate(A__ , A__) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase_ : Any = sorted(A__ , key=lambda snake_case__: x[1] , reverse=A__)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase_ : Optional[Any] = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(A__)
# Normalize population score to be between 0 and 1.
lowerCAmelCase_ : Any = [
(item, score / len(A__)) for item, score in population_score
]
# This is selection
for i in range(A__):
population.extend(select(population_score[int(A__)] , A__ , A__))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__) > N_POPULATION:
break
if __name__ == "__main__":
_lowercase = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowercase = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 659 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCAmelCase_ = 6
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1901
UpperCAmelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCAmelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCAmelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCAmelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCAmelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 579 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( __snake_case ):
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=False , snake_case=True , snake_case="None" , snake_case=3 , snake_case=4 , snake_case=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple = parent
lowercase : int = batch_size
lowercase : List[str] = seq_length
lowercase : str = is_training
lowercase : Union[str, Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : str = use_labels
lowercase : int = vocab_size
lowercase : Any = hidden_size
lowercase : List[str] = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : Dict = hidden_act
lowercase : Dict = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Union[str, Any] = type_vocab_size
lowercase : Optional[Any] = type_sequence_label_size
lowercase : List[Any] = initializer_range
lowercase : Optional[Any] = num_labels
lowercase : Optional[Any] = num_choices
lowercase : Optional[int] = relative_attention
lowercase : str = position_biased_input
lowercase : Dict = pos_att_type
lowercase : List[str] = scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = None
if self.use_input_mask:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase : Optional[int] = None
if self.use_token_type_ids:
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Dict = None
lowercase : List[str] = None
lowercase : str = None
if self.use_labels:
lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCAmelCase ( self , snake_case ) -> List[str]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = DebertaVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )[0]
lowercase : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )[0]
lowercase : Tuple = model(__lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
lowercase : str = DebertaVaForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Dict = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
lowercase : str = self.num_labels
lowercase : Union[str, Any] = DebertaVaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Optional[int] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCamelCase )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] = self.num_labels
lowercase : int = DebertaVaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = DebertaVaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Any = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = DebertaVaForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] = config_and_inputs
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __snake_case , __snake_case , unittest.TestCase ):
__UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Dict = DebertaVaModelTester(self )
lowercase : str = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCamelCase )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCamelCase )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCamelCase )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] = DebertaVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowercase : Optional[int] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowercase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
# compare the actual values for a slice.
lowercase : int = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 607 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
from __future__ import annotations
a_ = 1.6021E-19 # units = C
def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ,_UpperCamelCase : float ,):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCAmelCase : List[str] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_UpperCAmelCase : int = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
_A = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
_A = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class lowercase_ ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any]="replace", UpperCamelCase__ : int="<s>", UpperCamelCase__ : Any="</s>", UpperCamelCase__ : Optional[Any]="</s>", UpperCamelCase__ : Optional[int]="<s>", UpperCamelCase__ : str="<unk>", UpperCamelCase__ : List[str]="<pad>", UpperCamelCase__ : List[Any]="<mask>", UpperCamelCase__ : Tuple=False, **UpperCamelCase__ : Union[str, Any], ) -> str:
_A = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else bos_token
_A = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else eos_token
_A = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else sep_token
_A = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else cls_token
_A = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else unk_token
_A = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase, __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase, bos_token=__lowerCamelCase, eos_token=__lowerCamelCase, unk_token=__lowerCamelCase, sep_token=__lowerCamelCase, cls_token=__lowerCamelCase, pad_token=__lowerCamelCase, mask_token=__lowerCamelCase, add_prefix_space=__lowerCamelCase, **__lowerCamelCase, )
with open(__lowerCamelCase, encoding='utf-8' ) as vocab_handle:
_A = json.load(__lowerCamelCase )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase, encoding='utf-8' ) as merges_handle:
_A = merges_handle.read().split('\n' )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(__lowerCamelCase, range(len(__lowerCamelCase ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
return len(self.encoder )
def __UpperCAmelCase ( self : int ) -> Tuple:
return dict(self.encoder, **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : List[str] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
_A = tuple(__lowerCamelCase )
_A = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
_A = min(__lowerCamelCase, key=lambda UpperCamelCase__ : self.bpe_ranks.get(__lowerCamelCase, float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCamelCase ):
try:
_A = word.index(__lowerCamelCase, __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCamelCase )
_A = new_word
if len(__lowerCamelCase ) == 1:
break
else:
_A = get_pairs(__lowerCamelCase )
_A = ' '.join(__lowerCamelCase )
_A = word
return word
def __UpperCAmelCase ( self : int, UpperCamelCase__ : List[Any] ) -> Tuple:
_A = []
for token in re.findall(self.pat, __lowerCamelCase ):
_A = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(' ' ) )
return bpe_tokens
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase, self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : int ) -> str:
return self.decoder.get(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : List[str] ) -> List[str]:
_A = ''.join(__lowerCamelCase )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8', errors=self.errors )
return text
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> List[Any]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_A = os.path.join(
__lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(
__lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__lowerCamelCase, ensure_ascii=__lowerCamelCase ) + '\n' )
_A = 0
with open(__lowerCamelCase, 'w', encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_A = token_index
writer.write(' '.join(__lowerCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None ) -> Optional[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None, UpperCamelCase__ : bool = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase, token_ids_a=__lowerCamelCase, already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None ) -> Optional[Any]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : int, UpperCamelCase__ : Tuple=False, **UpperCamelCase__ : List[Any] ) -> Dict:
_A = kwargs.pop('add_prefix_space', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
_A = ' ' + text
return (text, kwargs)
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding], UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : Optional[bool] = None, ) -> Dict:
_A = super()._pad(
encoded_inputs=__lowerCamelCase, max_length=__lowerCamelCase, padding_strategy=__lowerCamelCase, pad_to_multiple_of=__lowerCamelCase, return_attention_mask=__lowerCamelCase, )
# Load from model defaults
if return_attention_mask is None:
_A = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A = len(encoded_inputs['global_attention_mask'] ) != len(__lowerCamelCase )
if needs_to_be_padded:
_A = len(__lowerCamelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_A = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 107 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
_lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
snake_case = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
snake_case = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
snake_case = field(
default=__snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
snake_case = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case = field(
default=__snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = field(default=__snake_case , metadata={'help': 'The input training data file (a text file).'} )
snake_case = field(
default=__snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
snake_case = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
snake_case = field(
default=__snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
snake_case = field(
default=__snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
snake_case = field(
default=__snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
snake_case = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.train_file is not None:
lowerCamelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = 42
snake_case = True
snake_case = None
snake_case = None
def __call__( self : List[str] , __snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 'label' if 'label' in features[0].keys() else 'labels'
lowerCamelCase = [feature.pop(__lowerCamelCase ) for feature in features]
lowerCamelCase = len(__lowerCamelCase )
lowerCamelCase = len(features[0]['input_ids'] )
lowerCamelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase )] for feature in features
]
lowerCamelCase = list(chain(*__lowerCamelCase ) )
lowerCamelCase = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCamelCase = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase = torch.tensor(__lowerCamelCase , dtype=torch.intaa )
return batch
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , A__ , A__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(A__ )
datasets.utils.logging.set_verbosity(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase = {}
if data_args.train_file is not None:
lowerCamelCase = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase = data_args.validation_file
lowerCamelCase = data_args.train_file.split('.' )[-1]
lowerCamelCase = load_dataset(
A__ , data_files=A__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase = [f'''ending{i}''' for i in range(4 )]
lowerCamelCase = 'sent1'
lowerCamelCase = 'sent2'
if data_args.max_seq_length is None:
lowerCamelCase = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCamelCase = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowerCamelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase_ : Optional[Any] ):
lowerCamelCase = [[context] * 4 for context in examples[context_name]]
lowerCamelCase = examples[question_header_name]
lowerCamelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(A__ )
]
# Flatten out
lowerCamelCase = list(chain(*A__ ) )
lowerCamelCase = list(chain(*A__ ) )
# Tokenize
lowerCamelCase = tokenizer(
A__ , A__ , truncation=A__ , max_length=A__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(A__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCamelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCamelCase = min(len(A__ ) , data_args.max_train_samples )
lowerCamelCase = train_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase = train_dataset.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCamelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCamelCase = min(len(A__ ) , data_args.max_eval_samples )
lowerCamelCase = eval_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase = eval_dataset.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=A__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase_ : Optional[int] ):
lowerCamelCase , lowerCamelCase = eval_predictions
lowerCamelCase = np.argmax(A__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase = Trainer(
model=A__ , args=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=A__ , data_collator=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase = last_checkpoint
lowerCamelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase = train_result.metrics
lowerCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
lowerCamelCase = min(A__ , len(A__ ) )
trainer.log_metrics('train' , A__ )
trainer.save_metrics('train' , A__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase = trainer.evaluate()
lowerCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A__ )
lowerCamelCase = min(A__ , len(A__ ) )
trainer.log_metrics('eval' , A__ )
trainer.save_metrics('eval' , A__ )
lowerCamelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
def a_ ( UpperCamelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 246 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : str = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _a ( __snake_case ):
"""simple docstring"""
A_ = """swin2sr"""
A_ = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _UpperCAmelCase=64 , _UpperCAmelCase=1 , _UpperCAmelCase=3 , _UpperCAmelCase=180 , _UpperCAmelCase=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase=8 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=2 , _UpperCAmelCase=1.0 , _UpperCAmelCase="1conv" , _UpperCAmelCase="pixelshuffle" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = len(__lowerCamelCase )
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = hidden_act
UpperCamelCase_ = use_absolute_embeddings
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = initializer_range
UpperCamelCase_ = upscale
UpperCamelCase_ = img_range
UpperCamelCase_ = resi_connection
UpperCamelCase_ = upsampler
| 23 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
for dlist, weight in zip(A__, A__ ):
__magic_name__ = min(A__ )
__magic_name__ = max(A__ )
__magic_name__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__magic_name__ = f'''Invalid weight of {weight:f} provided'''
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
__magic_name__ = final_scores[j] + ele
return final_scores
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = get_data(A__ )
__magic_name__ = calculate_each_score(A__, A__ )
__magic_name__ = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 529 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
from __future__ import annotations
import math
_snake_case = '2020.9.26'
_snake_case = 'xcodz-dot, cclaus, dhruvmanila'
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not all(isinstance(A__ , (float, int) ) for val in locals().values() ):
lowerCamelCase : Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(A__ )
lowerCamelCase : str = ((x * distance) / (z + distance)) * scale
lowerCamelCase : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
raise TypeError("Axis must be a str" )
lowerCamelCase : int = locals()
del input_variables["axis"]
if not all(isinstance(A__ , (float, int) ) for val in input_variables.values() ):
lowerCamelCase : int = (
"Input values except axis must either be float or int: "
f"""{list(input_variables.values() )}"""
)
raise TypeError(A__ )
lowerCamelCase : Optional[int] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase : Optional[int] = x * math.cos(A__ ) - y * math.sin(A__ )
lowerCamelCase : Optional[int] = y * math.cos(A__ ) + x * math.sin(A__ )
lowerCamelCase : Any = z
elif axis == "x":
lowerCamelCase : Any = y * math.cos(A__ ) - z * math.sin(A__ )
lowerCamelCase : str = z * math.cos(A__ ) + y * math.sin(A__ )
lowerCamelCase : Dict = x
elif axis == "y":
lowerCamelCase : int = x * math.cos(A__ ) - z * math.sin(A__ )
lowerCamelCase : Optional[Any] = z * math.cos(A__ ) + x * math.sin(A__ )
lowerCamelCase : Any = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
| 340 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase( __snake_case ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=__lowerCamelCase, vae=__lowerCamelCase, scheduler=__lowerCamelCase)
# create a imagenet -> id dictionary for easier use
_lowercase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
_lowercase : Optional[Any] = int(__lowerCamelCase)
_lowercase : Union[str, Any] = dict(sorted(self.labels.items()))
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase):
_lowercase : Optional[int] = list(__lowerCamelCase)
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase = 4.0, lowerCamelCase = None, lowerCamelCase = 50, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = len(__lowerCamelCase)
_lowercase : List[str] = self.transformer.config.sample_size
_lowercase : int = self.transformer.config.in_channels
_lowercase : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size), generator=__lowerCamelCase, device=self.device, dtype=self.transformer.dtype, )
_lowercase : Tuple = torch.cat([latents] * 2) if guidance_scale > 1 else latents
_lowercase : List[str] = torch.tensor(__lowerCamelCase, device=self.device).reshape(-1)
_lowercase : Any = torch.tensor([10_00] * batch_size, device=self.device)
_lowercase : Union[str, Any] = torch.cat([class_labels, class_null], 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
_lowercase : Optional[Any] = latent_model_input[: len(__lowerCamelCase) // 2]
_lowercase : str = torch.cat([half, half], dim=0)
_lowercase : List[Any] = self.scheduler.scale_model_input(__lowerCamelCase, __lowerCamelCase)
_lowercase : Tuple = t
if not torch.is_tensor(__lowerCamelCase):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowercase : List[Any] = latent_model_input.device.type == 'mps'
if isinstance(__lowerCamelCase, __lowerCamelCase):
_lowercase : Tuple = torch.floataa if is_mps else torch.floataa
else:
_lowercase : Optional[Any] = torch.intaa if is_mps else torch.intaa
_lowercase : List[Any] = torch.tensor([timesteps], dtype=__lowerCamelCase, device=latent_model_input.device)
elif len(timesteps.shape) == 0:
_lowercase : Optional[int] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowercase : Any = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
_lowercase : List[str] = self.transformer(
__lowerCamelCase, timestep=__lowerCamelCase, class_labels=__lowerCamelCase).sample
# perform guidance
if guidance_scale > 1:
_lowercase , _lowercase : List[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowercase , _lowercase : str = torch.split(__lowerCamelCase, len(__lowerCamelCase) // 2, dim=0)
_lowercase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowercase : Optional[Any] = torch.cat([half_eps, half_eps], dim=0)
_lowercase : Optional[Any] = torch.cat([eps, rest], dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowercase , _lowercase : List[str] = torch.split(__lowerCamelCase, __lowerCamelCase, dim=1)
else:
_lowercase : int = noise_pred
# compute previous image: x_t -> x_t-1
_lowercase : List[Any] = self.scheduler.step(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase).prev_sample
if guidance_scale > 1:
_lowercase , _lowercase : List[Any] = latent_model_input.chunk(2, dim=0)
else:
_lowercase : Optional[int] = latent_model_input
_lowercase : List[Any] = 1 / self.vae.config.scaling_factor * latents
_lowercase : Optional[Any] = self.vae.decode(__lowerCamelCase).sample
_lowercase : Tuple = (samples / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase : Tuple = samples.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
_lowercase : Any = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 89 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if rng is None:
_lowerCAmelCase : Dict = random.Random()
_lowerCAmelCase : Optional[Any] = 1
for dim in shape:
total_dims *= dim
_lowerCAmelCase : Optional[int] = []
for _ in range(A__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCAmelCase : List[Any] = np.array(A__ , dtype=jnp.intaa ).reshape(A__ )
return output
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor(A__ , vocab_size=2 , rng=A__ )
# make sure that at least one token is attended to for each batch
_lowerCAmelCase : Optional[Any] = 1
return attn_mask
@require_flax
class __UpperCamelCase :
_UpperCAmelCase = None
_UpperCAmelCase = ()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : List[str] = inputs['input_ids'].shape[-1] // 2
_lowerCAmelCase : Union[str, Any] = inputs['input_ids'][:max_batch_size, :sequence_length]
_lowerCAmelCase : Optional[Any] = jnp.ones_like(__lowerCamelCase )
_lowerCAmelCase : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCAmelCase : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCAmelCase : Tuple = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = self._get_input_ids_and_config()
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = max_length
_lowerCAmelCase : Any = 0
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : str = model_class(__lowerCamelCase )
_lowerCAmelCase : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase : List[str] = getattr(__lowerCamelCase ,__lowerCamelCase )
_lowerCAmelCase : Tuple = pt_model_class(__lowerCamelCase ).eval()
_lowerCAmelCase : Tuple = load_flax_weights_in_pytorch_model(__lowerCamelCase ,flax_model.params )
_lowerCAmelCase : List[Any] = flax_model.generate(__lowerCamelCase ).sequences
_lowerCAmelCase : int = pt_model.generate(torch.tensor(__lowerCamelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCAmelCase : Optional[int] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCAmelCase : Dict = False
_lowerCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Optional[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : int = jit(model.generate )
_lowerCAmelCase : List[str] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = self._get_input_ids_and_config()
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : Optional[Any] = jit(model.generate )
_lowerCAmelCase : Optional[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : List[str] = max_length
_lowerCAmelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : str = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : Optional[int] = jit(model.generate )
_lowerCAmelCase : List[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self._get_input_ids_and_config()
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[Any] = max_length
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Any = model_class(__lowerCamelCase )
_lowerCAmelCase : List[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = max_length
_lowerCAmelCase : List[str] = 0.8
_lowerCAmelCase : Tuple = 10
_lowerCAmelCase : Any = 0.3
_lowerCAmelCase : int = 1
_lowerCAmelCase : Union[str, Any] = 8
_lowerCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : str = jit(model.generate )
_lowerCAmelCase : Tuple = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
_lowerCAmelCase : List[str] = max_length
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : int = 8
_lowerCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Tuple = model_class(__lowerCamelCase )
_lowerCAmelCase : Dict = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : Optional[int] = jit(model.generate )
_lowerCAmelCase : List[str] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = self._get_input_ids_and_config()
_lowerCAmelCase : Optional[Any] = max_length
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : List[Any] = 8
_lowerCAmelCase : Tuple = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Dict = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : List[str] = jit(model.generate )
_lowerCAmelCase : Optional[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : Optional[int] = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : int = False
_lowerCAmelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : List[str] = jit(model.generate )
_lowerCAmelCase : Optional[int] = jit_generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Any = model_class(__lowerCamelCase )
_lowerCAmelCase : str = model.generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : int = jit(model.generate )
_lowerCAmelCase : List[Any] = jit_generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : List[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(__lowerCamelCase )
_lowerCAmelCase : Optional[int] = model.generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCamelCase )
_lowerCAmelCase : List[str] = jit(model.generate )
_lowerCAmelCase : List[Any] = jit_generate(__lowerCamelCase ,attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
_lowerCAmelCase : List[str] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCAmelCase : Any = 'Hello world'
_lowerCAmelCase : Optional[Any] = tokenizer(__lowerCamelCase ,return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase ,'do_samples' ):
model.generate(__lowerCamelCase ,do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase ,'foo' ):
_lowerCAmelCase : Union[str, Any] = {'foo': 'bar'}
model.generate(__lowerCamelCase ,**__lowerCamelCase )
| 259 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = 'https://openaipublic.azureedge.net/jukebox/models/'
_lowercase = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def UpperCamelCase ( snake_case__):
if key.endswith(".model.1.bias") and len(key.split(".")) > 10:
lowerCAmelCase_ : List[str] = key.replace(".model.1.bias" , ".conv1d_1.bias")
elif key.endswith(".model.1.weight") and len(key.split(".")) > 10:
lowerCAmelCase_ : List[str] = key.replace(".model.1.weight" , ".conv1d_1.weight")
elif key.endswith(".model.3.bias") and len(key.split(".")) > 10:
lowerCAmelCase_ : int = key.replace(".model.3.bias" , ".conv1d_2.bias")
elif key.endswith(".model.3.weight") and len(key.split(".")) > 10:
lowerCAmelCase_ : Optional[int] = key.replace(".model.3.weight" , ".conv1d_2.weight")
if "conditioner_blocks.0." in key:
lowerCAmelCase_ : List[Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks")
if "prime_prior" in key:
lowerCAmelCase_ : Union[str, Any] = key.replace("prime_prior" , "encoder")
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase_ : Any = key.replace(".emb." , ".")
if key.endswith("k"): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook")
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding.")
if "x_emb.emb." in key:
lowerCAmelCase_ : List[Any] = key.replace("0.x_emb.emb" , "embed_tokens")
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm")
if ".ln" in key:
return key.replace(".ln" , ".layer_norm")
if "_ln" in key:
return key.replace("_ln" , "_layer_norm")
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in")
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head")
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out")
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens")
return key
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : str = {}
import re
lowerCAmelCase_ : int = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)")
lowerCAmelCase_ : List[Any] = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)")
lowerCAmelCase_ : Optional[Any] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)")
lowerCAmelCase_ : List[str] = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)")
lowerCAmelCase_ : Optional[Any] = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)")
lowerCAmelCase_ : int = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)")
lowerCAmelCase_ : Any = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)")
lowerCAmelCase_ : Any = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)")
lowerCAmelCase_ : Any = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)")
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__):
lowerCAmelCase_ : Any = re_encoder_block_conv_in.match(A__)
lowerCAmelCase_ : Optional[int] = regex_match.groups()
lowerCAmelCase_ : Any = int(groups[2]) * 2 + int(groups[3])
lowerCAmelCase_ : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCAmelCase_ : Any = re_encoder_block_conv_in.sub(A__ , A__)
elif re_encoder_block_resnet.fullmatch(A__):
lowerCAmelCase_ : Dict = re_encoder_block_resnet.match(A__)
lowerCAmelCase_ : List[str] = regex_match.groups()
lowerCAmelCase_ : List[Any] = int(groups[2]) * 2 + int(groups[3])
lowerCAmelCase_ : int = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase_ : Tuple = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCAmelCase_ : List[str] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCAmelCase_ : Optional[int] = prefix + resnet_block
lowerCAmelCase_ : Any = re_encoder_block_resnet.sub(A__ , A__)
elif re_encoder_block_proj_out.fullmatch(A__):
lowerCAmelCase_ : List[Any] = re_encoder_block_proj_out.match(A__)
lowerCAmelCase_ : Any = regex_match.groups()
lowerCAmelCase_ : Optional[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCAmelCase_ : Any = re_encoder_block_proj_out.sub(A__ , A__)
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__):
lowerCAmelCase_ : List[Any] = re_decoder_block_conv_out.match(A__)
lowerCAmelCase_ : Optional[int] = regex_match.groups()
lowerCAmelCase_ : List[str] = int(groups[2]) * 2 + int(groups[3]) - 2
lowerCAmelCase_ : Tuple = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCAmelCase_ : Dict = re_decoder_block_conv_out.sub(A__ , A__)
elif re_decoder_block_resnet.fullmatch(A__):
lowerCAmelCase_ : Union[str, Any] = re_decoder_block_resnet.match(A__)
lowerCAmelCase_ : Optional[int] = regex_match.groups()
lowerCAmelCase_ : List[Any] = int(groups[2]) * 2 + int(groups[3]) - 2
lowerCAmelCase_ : Optional[Any] = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase_ : Dict = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCAmelCase_ : Dict = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCAmelCase_ : List[Any] = prefix + resnet_block
lowerCAmelCase_ : int = re_decoder_block_resnet.sub(A__ , A__)
elif re_decoder_block_proj_in.fullmatch(A__):
lowerCAmelCase_ : str = re_decoder_block_proj_in.match(A__)
lowerCAmelCase_ : Union[str, Any] = regex_match.groups()
lowerCAmelCase_ : List[str] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCAmelCase_ : Optional[int] = re_decoder_block_proj_in.sub(A__ , A__)
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__):
lowerCAmelCase_ : Optional[int] = re_prior_cond_conv_out.match(A__)
lowerCAmelCase_ : str = regex_match.groups()
lowerCAmelCase_ : Tuple = int(groups[1]) * 2 + int(groups[2]) - 2
lowerCAmelCase_ : int = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCAmelCase_ : List[Any] = re_prior_cond_conv_out.sub(A__ , A__)
elif re_prior_cond_resnet.fullmatch(A__):
lowerCAmelCase_ : str = re_prior_cond_resnet.match(A__)
lowerCAmelCase_ : List[str] = regex_match.groups()
lowerCAmelCase_ : Optional[Any] = int(groups[1]) * 2 + int(groups[2]) - 2
lowerCAmelCase_ : List[Any] = {"1": 1, "3": 2}[groups[-2]]
lowerCAmelCase_ : List[str] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCAmelCase_ : Union[str, Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCAmelCase_ : Optional[int] = prefix + resnet_block
lowerCAmelCase_ : List[str] = re_prior_cond_resnet.sub(A__ , A__)
elif re_prior_cond_proj_in.fullmatch(A__):
lowerCAmelCase_ : Tuple = re_prior_cond_proj_in.match(A__)
lowerCAmelCase_ : Any = regex_match.groups()
lowerCAmelCase_ : List[str] = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCAmelCase_ : List[str] = re_prior_cond_proj_in.sub(A__ , A__)
# keep original key
else:
lowerCAmelCase_ : Union[str, Any] = original_key
lowerCAmelCase_ : str = replace_key(A__)
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''')
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCAmelCase_ : int = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''')
lowerCAmelCase_ : List[Any] = original_key
lowerCAmelCase_ : List[Any] = original_key
lowerCAmelCase_ : Optional[Any] = value
return new_dict
@torch.no_grad()
def UpperCamelCase ( snake_case__=None , snake_case__=None):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/")[-1]}'''):
lowerCAmelCase_ : List[Any] = requests.get(F'''{PREFIX}{file}''' , allow_redirects=A__)
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=A__)
open(F'''{pytorch_dump_folder_path}/{file.split("/")[-1]}''' , "wb").write(r.content)
lowerCAmelCase_ : Tuple = MODEL_MAPPING[model_name.split("/")[-1]]
lowerCAmelCase_ : str = JukeboxConfig.from_pretrained(A__)
lowerCAmelCase_ : Dict = JukeboxModel(A__)
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Union[str, Any] = {}
for i, dict_name in enumerate(A__):
lowerCAmelCase_ : Union[str, Any] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/")[-1]}''')["model"]
lowerCAmelCase_ : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith(".b"):
lowerCAmelCase_ : str = old_dic[k]
elif k.endswith(".w"):
lowerCAmelCase_ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase_ : Tuple = old_dic[k]
else:
lowerCAmelCase_ : str = old_dic[k]
lowerCAmelCase_ : Dict = "vqvae" if i == 0 else F'''priors.{3 - i}'''
lowerCAmelCase_ : Tuple = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__)
weight_dict.append(A__)
lowerCAmelCase_ : List[Any] = weight_dict.pop(0)
model.vqvae.load_state_dict(A__)
for i in range(len(A__)):
model.priors[i].load_state_dict(weight_dict[2 - i])
Path(A__).mkdir(exist_ok=A__)
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , "w") as txtfile:
json.dump(A__ , A__)
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(A__)
return weight_dict
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
_lowercase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 659 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
SCREAMING_SNAKE_CASE = range(2, 20 + 1)
SCREAMING_SNAKE_CASE = [10**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE = {}
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = sum(a_i[j] for j in range(A__ , len(A__ ) ) )
UpperCAmelCase_ = sum(a_i[j] * base[j] for j in range(min(len(A__ ) , A__ ) ) )
UpperCAmelCase_ , UpperCAmelCase_ = 0, 0
UpperCAmelCase_ = n - i
UpperCAmelCase_ = memo.get(A__ )
if sub_memo is not None:
UpperCAmelCase_ = sub_memo.get(A__ )
if jumps is not None and len(A__ ) > 0:
# find and make the largest jump without going over
UpperCAmelCase_ = -1
for _k in range(len(A__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase_ = _k
break
if max_jump >= 0:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase_ = diff + c
for j in range(min(A__ , len(A__ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = divmod(A__ , 10 )
if new_c > 0:
add(A__ , A__ , A__ )
else:
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = {c: []}
UpperCAmelCase_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase_ , UpperCAmelCase_ = next_term(A__ , k - 1 , i + dn , A__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase_ , UpperCAmelCase_ = compute(A__ , A__ , i + dn , A__ )
diff += _diff
dn += terms_jumped
UpperCAmelCase_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase_ = 0
while j < len(A__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A__ , (diff, dn, k) )
return (diff, dn)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
if i >= n:
return 0, i
if k > len(A__ ):
a_i.extend([0 for _ in range(k - len(A__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase_ = i
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
for j in range(len(A__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase_ = ds_c + ds_b
diff += addend
UpperCAmelCase_ = 0
for j in range(A__ ):
UpperCAmelCase_ = a_i[j] + addend
UpperCAmelCase_ , UpperCAmelCase_ = divmod(A__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A__ , A__ , A__ )
return diff, i - start_i
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(A__ , len(A__ ) ):
UpperCAmelCase_ = digits[j] + addend
if s >= 10:
UpperCAmelCase_ , UpperCAmelCase_ = divmod(A__ , 10 )
UpperCAmelCase_ = addend // 10 + quotient
else:
UpperCAmelCase_ = s
UpperCAmelCase_ = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase_ , UpperCAmelCase_ = divmod(A__ , 10 )
digits.append(A__ )
def snake_case__ ( __SCREAMING_SNAKE_CASE = 10**15 ) -> Optional[Any]:
UpperCAmelCase_ = [1]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ , UpperCAmelCase_ = next_term(A__ , 20 , i + dn , A__ )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase_ = 0
for j in range(len(A__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 579 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = parent
lowercase : Optional[Any] = batch_size
lowercase : int = image_size
lowercase : Optional[Any] = num_channels
lowercase : str = embeddings_size
lowercase : List[Any] = hidden_sizes
lowercase : List[Any] = depths
lowercase : List[Any] = is_training
lowercase : str = use_labels
lowercase : Tuple = hidden_act
lowercase : Optional[int] = num_labels
lowercase : int = scope
lowercase : Dict = len(__lowerCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Any = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase : int = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = TFRegNetModel(config=__lowerCamelCase )
lowercase : Union[str, Any] = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : int = self.num_labels
lowercase : Optional[int] = TFRegNetForImageClassification(__lowerCamelCase )
lowercase : List[str] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Tuple = config_and_inputs
lowercase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __snake_case , __snake_case , unittest.TestCase ):
__UpperCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = TFRegNetModelTester(self )
lowercase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] = model_class(__lowerCamelCase )
lowercase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Dict = [*signature.parameters.keys()]
lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowercase : List[str] = model_class(__lowerCamelCase )
lowercase : Optional[int] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
lowercase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[int] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : Any = layer_type
lowercase : List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case , snake_case , snake_case , snake_case={} ):
lowercase : Tuple = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
lowercase : Any = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(snake_case , snake_case ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
lowercase : int = model_class(__lowerCamelCase )
lowercase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
lowercase : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
lowercase : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
lowercase : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"""output_hidden_states""": True} )
lowercase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
lowercase : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"""output_hidden_states""": True} )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __snake_case ( ) -> List[Any]:
lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase : Tuple = self.default_image_processor
lowercase : Tuple = prepare_img()
lowercase : Tuple = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
lowercase : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
lowercase : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowercase : List[str] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
| 607 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __snake_case , unittest.TestCase ):
lowerCAmelCase__ = DiTPipeline
lowerCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCamelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__lowerCamelCase , )
__lowerCamelCase = AutoencoderKL()
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__lowerCamelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__lowerCamelCase )
else:
__lowerCamelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__lowerCamelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__lowerCamelCase = self.get_dummy_inputs(__lowerCamelCase )
__lowerCamelCase = pipe(**__lowerCamelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
__lowerCamelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
__lowerCamelCase = pipe.get_label_ids(__lowerCamelCase )
__lowerCamelCase = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
__lowerCamelCase = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
__lowerCamelCase = ['''vase''', '''umbrella''']
__lowerCamelCase = pipe.get_label_ids(__lowerCamelCase )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 175 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE ( __snake_case : list[list[float]] ):
_A = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_A = [[0.0, 0.0], [0.0, 0.0]]
_A , _A = matrix[1][1], matrix[0][0]
_A , _A = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_A = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A = array(A__ )
for i in range(3 ):
for j in range(3 ):
_A = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : List[str] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
_lowerCAmelCase : Optional[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
_lowerCAmelCase : str = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 246 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 23 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCAmelCase : List[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def a__ ( ):
'''simple docstring'''
__magic_name__ = _ask_options(
"""In which compute environment are you running?""", ["""This machine""", """AWS (Amazon SageMaker)"""], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__magic_name__ = get_sagemaker_input()
else:
__magic_name__ = get_cluster_input()
return config
def a__ ( A_=None ):
'''simple docstring'''
if subparsers is not None:
__magic_name__ = subparsers.add_parser("""config""", description=A__ )
else:
__magic_name__ = argparse.ArgumentParser("""Accelerate config command""", description=A__ )
parser.add_argument(
"""--config_file""", default=A__, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_user_input()
if args.config_file is not None:
__magic_name__ = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
__magic_name__ = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(f'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
'''simple docstring'''
__magic_name__ = config_command_parser()
__magic_name__ = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 529 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(A__ , config=A__ )
lowerCamelCase : Any = downstream_dict["projector.weight"]
lowerCamelCase : Optional[Any] = downstream_dict["projector.bias"]
lowerCamelCase : Optional[int] = downstream_dict["model.post_net.linear.weight"]
lowerCamelCase : List[str] = downstream_dict["model.post_net.linear.bias"]
return model
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = UniSpeechSatForAudioFrameClassification.from_pretrained(A__ , config=A__ )
lowerCamelCase : List[Any] = downstream_dict["model.linear.weight"]
lowerCamelCase : Optional[int] = downstream_dict["model.linear.bias"]
return model
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = UniSpeechSatForXVector.from_pretrained(A__ , config=A__ )
lowerCamelCase : Any = downstream_dict["connector.weight"]
lowerCamelCase : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCamelCase : List[Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCamelCase : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowerCamelCase : List[str] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowerCamelCase : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowerCamelCase : int = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowerCamelCase : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = torch.load(A__ , map_location="cpu" )
lowerCamelCase : List[Any] = checkpoint["Downstream"]
lowerCamelCase : Dict = UniSpeechSatConfig.from_pretrained(A__ )
lowerCamelCase : int = WavaVecaFeatureExtractor.from_pretrained(
A__ , return_attention_mask=A__ , do_normalize=A__ )
lowerCamelCase : List[str] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowerCamelCase : Dict = convert_classification(A__ , A__ , A__ )
elif arch.endswith("ForAudioFrameClassification" ):
lowerCamelCase : Union[str, Any] = convert_diarization(A__ , A__ , A__ )
elif arch.endswith("ForXVector" ):
lowerCamelCase : int = convert_xvector(A__ , A__ , A__ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCamelCase : str = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_snake_case = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 340 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=3, lowerCamelCase=10, lowerCamelCase=[10, 20, 30, 40], lowerCamelCase=[1, 1, 2, 1], lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=3, lowerCamelCase=None, ) -> int:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : Union[str, Any] = image_size
_lowercase : int = num_channels
_lowercase : Tuple = embeddings_size
_lowercase : List[str] = hidden_sizes
_lowercase : Union[str, Any] = depths
_lowercase : List[Any] = is_training
_lowercase : Optional[Any] = use_labels
_lowercase : int = hidden_act
_lowercase : Optional[Any] = num_labels
_lowercase : str = scope
_lowercase : str = len(__lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : int = ids_tensor([self.batch_size], self.num_labels)
_lowercase : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = TFResNetModel(config=__lowerCamelCase)
_lowercase : int = model(__lowerCamelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.num_labels
_lowercase : List[Any] = TFResNetForImageClassification(__lowerCamelCase)
_lowercase : Optional[int] = model(__lowerCamelCase, labels=__lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase( __snake_case, __snake_case, unittest.TestCase ):
lowercase_ : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase_ : Optional[Any] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : Any = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
lowercase_ : Any = False
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = TFResNetModelTester(self)
_lowercase : str = ConfigTester(self, config_class=__lowerCamelCase, has_text_modality=__lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = model_class(__lowerCamelCase)
_lowercase : int = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : int = [*signature.parameters.keys()]
_lowercase : str = ['pixel_values']
self.assertListEqual(arg_names[:1], __lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : List[Any] = model_class(__lowerCamelCase)
_lowercase : Optional[int] = model(**self._prepare_for_class(__lowerCamelCase, __lowerCamelCase))
_lowercase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase : str = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase), expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Union[str, Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase : Any = layer_type
_lowercase : Any = True
check_hidden_states_output(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Tuple = True
check_hidden_states_output(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = TFResNetModel.from_pretrained(__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_img()
_lowercase : List[str] = image_processor(images=__lowerCamelCase, return_tensors='tf')
# forward pass
_lowercase : Union[str, Any] = model(**__lowerCamelCase)
# verify the logits
_lowercase : Tuple = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape, __lowerCamelCase)
_lowercase : Dict = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), __lowerCamelCase, atol=1E-4))
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( __snake_case ):
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' ,__lowerCamelCase ,)
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
| 259 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
from __future__ import annotations
from collections.abc import Callable
_lowercase = list[list[float | int]]
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = len(A__)
lowerCAmelCase_ : Any = [[0 for _ in range(size + 1)] for _ in range(A__)]
lowerCAmelCase_ : Optional[int] = 42
lowerCAmelCase_ : Dict = 42
lowerCAmelCase_ : Optional[int] = 42
lowerCAmelCase_ : Optional[Any] = 42
lowerCAmelCase_ : Optional[int] = 42
lowerCAmelCase_ : Optional[Any] = 42
for row in range(A__):
for col in range(A__):
lowerCAmelCase_ : Optional[Any] = matrix[row][col]
lowerCAmelCase_ : List[Any] = vector[row][0]
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = 0
while row < size and col < size:
# pivoting
lowerCAmelCase_ : List[str] = max((abs(augmented[rowa][col]), rowa) for rowa in range(A__ , A__))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase_ , lowerCAmelCase_ : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A__):
lowerCAmelCase_ : str = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase_ : Any = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A__):
for row in range(A__):
lowerCAmelCase_ : Dict = augmented[row][col] / augmented[col][col]
for cola in range(A__ , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(A__)
]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = len(A__)
lowerCAmelCase_ : Dict = [[0 for _ in range(A__)] for _ in range(A__)]
lowerCAmelCase_ : Optional[Any] = [[0] for _ in range(A__)]
lowerCAmelCase_ : Union[str, Any] = 42
lowerCAmelCase_ : Optional[Any] = 42
lowerCAmelCase_ : Dict = 42
lowerCAmelCase_ : List[str] = 42
for x_val, y_val in enumerate(A__):
for col in range(A__):
lowerCAmelCase_ : Optional[Any] = (x_val + 1) ** (size - col - 1)
lowerCAmelCase_ : Dict = y_val
lowerCAmelCase_ : List[str] = solve(A__ , A__)
def interpolated_func(snake_case__) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(A__))
return interpolated_func
def UpperCamelCase ( snake_case__):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase ( snake_case__ = question_function , snake_case__ = 10):
lowerCAmelCase_ : List[Any] = [func(A__) for x_val in range(1 , order + 1)]
lowerCAmelCase_ : List[str] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 42
lowerCAmelCase_ : str = 42
for poly in polynomials:
lowerCAmelCase_ : Union[str, Any] = 1
while func(A__) == poly(A__):
x_val += 1
ret += poly(A__)
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 659 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = PhobertTokenizer
lowerCAmelCase_ : Optional[int] = False
def A__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = ["T@@", "i", "I", "R@@", "r", "e@@"]
UpperCAmelCase_ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "l à</w>"]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def A__ ( self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = "Tôi là VinAI Research"
UpperCAmelCase_ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def A__ ( self ):
UpperCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ = "Tôi là VinAI Research"
UpperCAmelCase_ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
UpperCAmelCase_ = tokenizer.tokenize(__lowerCamelCase )
print(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
| 579 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Optional[int] =logging.get_logger(__name__)
lowerCAmelCase: Optional[Any] ={
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__ ( __snake_case ):
__UpperCAmelCase = """gpt_neox"""
def __init__( self , snake_case=5_0_4_3_2 , snake_case=6_1_4_4 , snake_case=4_4 , snake_case=6_4 , snake_case=2_4_5_7_6 , snake_case="gelu" , snake_case=0.25 , snake_case=1_0_0_0_0 , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case=2_0_4_8 , snake_case=0.02 , snake_case=1E-5 , snake_case=True , snake_case=0 , snake_case=2 , snake_case=False , snake_case=True , snake_case=None , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowercase : Tuple = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = hidden_size
lowercase : int = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : int = intermediate_size
lowercase : Dict = hidden_act
lowercase : str = rotary_pct
lowercase : str = rotary_emb_base
lowercase : int = attention_dropout
lowercase : List[Any] = hidden_dropout
lowercase : Optional[int] = classifier_dropout
lowercase : Optional[Any] = initializer_range
lowercase : List[str] = layer_norm_eps
lowercase : Optional[Any] = use_cache
lowercase : Optional[int] = tie_word_embeddings
lowercase : Any = use_parallel_residual
lowercase : str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
lowercase : int = self.rope_scaling.get("""type""" , __lowerCamelCase )
lowercase : Any = self.rope_scaling.get("""factor""" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 607 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 175 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = "blenderbot-small"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any], UpperCamelCase__ : List[str]=5_02_65, UpperCamelCase__ : List[Any]=5_12, UpperCamelCase__ : Optional[Any]=8, UpperCamelCase__ : str=20_48, UpperCamelCase__ : str=16, UpperCamelCase__ : Optional[int]=8, UpperCamelCase__ : Optional[int]=20_48, UpperCamelCase__ : str=16, UpperCamelCase__ : int=0.0, UpperCamelCase__ : Any=0.0, UpperCamelCase__ : Tuple=True, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : int="gelu", UpperCamelCase__ : str=5_12, UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : List[str]=0.0, UpperCamelCase__ : Optional[Any]=0.0, UpperCamelCase__ : Union[str, Any]=0.02, UpperCamelCase__ : Union[str, Any]=1, UpperCamelCase__ : List[Any]=False, UpperCamelCase__ : int=0, UpperCamelCase__ : Optional[Any]=1, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : Optional[Any]=2, **UpperCamelCase__ : Optional[int], ) -> Optional[int]:
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase, bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase, is_encoder_decoder=__lowerCamelCase, decoder_start_token_id=__lowerCamelCase, forced_eos_token_id=__lowerCamelCase, **__lowerCamelCase, )
class lowercase_ ( __snake_case ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_A = {0: 'batch'}
_A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_A = {0: 'batch', 1: 'decoder_sequence'}
_A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase, direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_A , _A = self.num_layers
for i in range(__lowerCamelCase ):
_A = {0: 'batch', 2: 'past_sequence + sequence'}
_A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
_A = super().outputs
else:
_A = super(__lowerCamelCase, self ).outputs
if self.use_past:
_A , _A = self.num_layers
for i in range(__lowerCamelCase ):
_A = {0: 'batch', 2: 'past_sequence + sequence'}
_A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : PreTrainedTokenizer, UpperCamelCase__ : int = -1, UpperCamelCase__ : int = -1, UpperCamelCase__ : bool = False, UpperCamelCase__ : Optional[TensorType] = None, ) -> str:
_A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Generate decoder inputs
_A = seq_length if not self.use_past else 1
_A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_A = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_A = dict(**__lowerCamelCase, **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_A , _A = common_inputs['input_ids'].shape
_A = common_inputs['decoder_input_ids'].shape[1]
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = decoder_seq_length + 3
_A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__lowerCamelCase, __lowerCamelCase )], dim=1 )
_A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_A , _A = self.num_layers
_A = min(__lowerCamelCase, __lowerCamelCase )
_A = max(__lowerCamelCase, __lowerCamelCase ) - min_num_layers
_A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
_A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__lowerCamelCase, __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : PreTrainedTokenizer, UpperCamelCase__ : int = -1, UpperCamelCase__ : int = -1, UpperCamelCase__ : bool = False, UpperCamelCase__ : Optional[TensorType] = None, ) -> Any:
_A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_A , _A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_A = seqlen + 2
_A , _A = self.num_layers
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = common_inputs['attention_mask'].dtype
_A = torch.cat(
[common_inputs['attention_mask'], torch.ones(__lowerCamelCase, __lowerCamelCase, dtype=__lowerCamelCase )], dim=1 )
_A = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : PreTrainedTokenizer, UpperCamelCase__ : int = -1, UpperCamelCase__ : int = -1, UpperCamelCase__ : bool = False, UpperCamelCase__ : Optional[TensorType] = None, ) -> List[Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_A = compute_effective_axis_dimension(
__lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_A = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
_A = compute_effective_axis_dimension(
__lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
_A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_A = dict(tokenizer(__lowerCamelCase, return_tensors=__lowerCamelCase ) )
return common_inputs
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : PreTrainedTokenizer, UpperCamelCase__ : int = -1, UpperCamelCase__ : int = -1, UpperCamelCase__ : bool = False, UpperCamelCase__ : Optional[TensorType] = None, ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
_A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase, batch_size=__lowerCamelCase, seq_length=__lowerCamelCase, is_pair=__lowerCamelCase, framework=__lowerCamelCase )
elif self.task == "causal-lm":
_A = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase, batch_size=__lowerCamelCase, seq_length=__lowerCamelCase, is_pair=__lowerCamelCase, framework=__lowerCamelCase )
else:
_A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, batch_size=__lowerCamelCase, seq_length=__lowerCamelCase, is_pair=__lowerCamelCase, framework=__lowerCamelCase )
return common_inputs
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict, UpperCamelCase__ : int ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
_A = super()._flatten_past_key_values_(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
_A = super(__lowerCamelCase, self )._flatten_past_key_values_(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
| 107 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
import math
from collections.abc import Callable
def a_ ( UpperCamelCase_ : Callable[[float], float] , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> int:
"""simple docstring"""
lowerCamelCase = xa
lowerCamelCase = xa
while True:
if x_n == x_na or function(A__ ) == function(A__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
lowerCamelCase = x_na - (
function(A__ ) / ((function(A__ ) - function(A__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
lowerCamelCase = x_na
lowerCamelCase = x_na
def a_ ( UpperCamelCase_ : float ) -> Tuple:
"""simple docstring"""
return math.pow(A__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 246 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
snake_case__ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
snake_case__ : Any = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
snake_case__ : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
snake_case__ : str = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
snake_case__ : Dict = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
snake_case__ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
snake_case__ : Any = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
snake_case__ : List[str] = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
snake_case__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case__ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_MAPPING
snake_case__ : Optional[int] = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case__ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case__ : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case__ : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case__ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 23 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( A_, A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = TapasConfig.from_json_file(A__ )
# set absolute/relative position embeddings parameter
__magic_name__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__magic_name__ = TapasForQuestionAnswering(config=A__ )
elif task == "WTQ":
# run_task_main.py hparams
__magic_name__ = 4
__magic_name__ = True
# hparam_utils.py hparams
__magic_name__ = 0.664694
__magic_name__ = 0.207951
__magic_name__ = 0.121194
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = 0.0352513
__magic_name__ = TapasForQuestionAnswering(config=A__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__magic_name__ = 4
__magic_name__ = False
# hparam_utils.py hparams
__magic_name__ = 36.4519
__magic_name__ = 0.903421
__magic_name__ = 222.088
__magic_name__ = True
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0.763141
__magic_name__ = TapasForQuestionAnswering(config=A__ )
elif task == "TABFACT":
__magic_name__ = TapasForSequenceClassification(config=A__ )
elif task == "MLM":
__magic_name__ = TapasForMaskedLM(config=A__ )
elif task == "INTERMEDIATE_PRETRAINING":
__magic_name__ = TapasModel(config=A__ )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A__, A__, A__ )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A__ )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
__magic_name__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""", model_max_length=512 )
tokenizer.save_pretrained(A__ )
print("""Used relative position embeddings:""", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 529 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A=13 , __A=10 , __A=3 , __A=2 , __A=2 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A="divided_space_time" , __A=None , ):
"""simple docstring"""
lowerCamelCase : int = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[str] = image_size
lowerCamelCase : str = num_channels
lowerCamelCase : Dict = patch_size
lowerCamelCase : int = num_frames
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : str = use_labels
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = attention_type
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : List[Any] = scope
lowerCamelCase : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCamelCase : int = (image_size // patch_size) ** 2
lowerCamelCase : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCamelCase : List[str] = self.num_labels
return config
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = TimesformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : str = TimesformerForVideoClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = model(__lowerCamelCase )
# verify the logits shape
lowerCamelCase : Any = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__A : List[str] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__A : int = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Union[str, Any] = False
__A : Optional[int] = False
__A : str = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TimesformerModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self , __A , __A , __A=False ):
"""simple docstring"""
lowerCamelCase : Optional[int] = copy.deepcopy(__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(__lowerCamelCase )
lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : str = [*signature.parameters.keys()]
lowerCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = TimesformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = True
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = self.model_tester.seq_length
lowerCamelCase : int = self.model_tester.num_frames
lowerCamelCase : Tuple = True
lowerCamelCase : List[str] = False
lowerCamelCase : Dict = True
lowerCamelCase : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase : int = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : str = True
lowerCamelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase : List[Any] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCamelCase : Optional[Any] = len(__lowerCamelCase )
# Check attention is always last and order is fine
lowerCamelCase : Dict = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
lowerCamelCase : int = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(__A , __A , __A ):
lowerCamelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase : str = outputs.hidden_states
lowerCamelCase : Any = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
lowerCamelCase : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : List[str] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCamelCase : List[Any] = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__lowerCamelCase )
lowerCamelCase : int = self.default_image_processor
lowerCamelCase : List[Any] = prepare_video()
lowerCamelCase : Optional[Any] = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase : int = model(**__lowerCamelCase )
# verify the logits
lowerCamelCase : List[str] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowerCamelCase : Tuple = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 340 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE : Tuple = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
if "://" in dataset_path:
_lowercase : Dict = dataset_path.split('://' )[1]
return dataset_path
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = not is_remote_filesystem(A__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(A__ ) , fs._strip_protocol(A__ ) )
else:
fs.mv(A__ , A__ , recursive=A__ )
def UpperCamelCase_( ) -> Any:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowercase : Optional[Any] = None
_lowercase : Dict = None
_lowercase : Optional[Any] = threading.Lock()
| 89 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCamelCase ( __snake_case ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "BlipImageProcessor"
_UpperCAmelCase = "AutoTokenizer"
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__(__lowerCamelCase ,__lowerCamelCase )
# add QFormer tokenizer
_lowerCAmelCase : Dict = qformer_tokenizer
def __call__( self ,_A = None ,_A = None ,_A = True ,_A = False ,_A = None ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A = False ,_A = False ,_A = False ,_A = False ,_A = False ,_A = True ,_A = None ,**_A ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCAmelCase : List[Any] = BatchFeature()
if text is not None:
_lowerCAmelCase : str = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
encoding.update(__lowerCamelCase )
_lowerCAmelCase : Optional[int] = self.qformer_tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
_lowerCAmelCase : Optional[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCAmelCase : Union[str, Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCAmelCase : List[str] = self.image_processor(__lowerCamelCase ,return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer.model_input_names
_lowerCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __lowerCamelCase ( self ,_A ,**_A ):
'''simple docstring'''
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
_lowerCAmelCase : str = os.path.join(__lowerCamelCase ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase ,**__lowerCamelCase )
@classmethod
def __lowerCamelCase ( cls ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(__lowerCamelCase ,subfolder='qformer_tokenizer' )
_lowerCAmelCase : int = cls._get_arguments_from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 259 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_lowercase = logging.get_logger(__name__)
# General docstring
_lowercase = 'MobileNetV1Config'
# Base docstring
_lowercase = 'google/mobilenet_v1_1.0_224'
_lowercase = [1, 1024, 7, 7]
# Image classification docstring
_lowercase = 'google/mobilenet_v1_1.0_224'
_lowercase = 'tabby, tabby cat'
_lowercase = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
lowerCAmelCase_ : Optional[Any] = {}
if isinstance(A__ , A__):
lowerCAmelCase_ : int = model.mobilenet_va
else:
lowerCAmelCase_ : Optional[Any] = model
lowerCAmelCase_ : Dict = "MobilenetV1/Conv2d_0/"
lowerCAmelCase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowerCAmelCase_ : Union[str, Any] = backbone.conv_stem.normalization.bias
lowerCAmelCase_ : str = backbone.conv_stem.normalization.weight
lowerCAmelCase_ : List[str] = backbone.conv_stem.normalization.running_mean
lowerCAmelCase_ : str = backbone.conv_stem.normalization.running_var
for i in range(13):
lowerCAmelCase_ : Any = i + 1
lowerCAmelCase_ : Optional[int] = i * 2
lowerCAmelCase_ : Union[str, Any] = backbone.layer[pt_index]
lowerCAmelCase_ : List[Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowerCAmelCase_ : Tuple = pointer.convolution.weight
lowerCAmelCase_ : List[Any] = pointer.normalization.bias
lowerCAmelCase_ : Dict = pointer.normalization.weight
lowerCAmelCase_ : Any = pointer.normalization.running_mean
lowerCAmelCase_ : List[Any] = pointer.normalization.running_var
lowerCAmelCase_ : Any = backbone.layer[pt_index + 1]
lowerCAmelCase_ : List[Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowerCAmelCase_ : str = pointer.convolution.weight
lowerCAmelCase_ : Tuple = pointer.normalization.bias
lowerCAmelCase_ : List[str] = pointer.normalization.weight
lowerCAmelCase_ : Optional[Any] = pointer.normalization.running_mean
lowerCAmelCase_ : List[str] = pointer.normalization.running_var
if isinstance(A__ , A__):
lowerCAmelCase_ : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/"
lowerCAmelCase_ : Dict = model.classifier.weight
lowerCAmelCase_ : Tuple = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Load weights from TF model
lowerCAmelCase_ : Optional[Any] = tf.train.list_variables(A__)
lowerCAmelCase_ : Tuple = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''')
lowerCAmelCase_ : Optional[Any] = tf.train.load_variable(A__ , A__)
lowerCAmelCase_ : List[Any] = array
# Build TF to PyTorch weights loading map
lowerCAmelCase_ : Union[str, Any] = _build_tf_to_pytorch_map(A__ , A__ , A__)
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''')
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''')
continue
lowerCAmelCase_ : str = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise")
lowerCAmelCase_ : List[str] = np.transpose(A__ , (2, 3, 0, 1))
elif "weights" in name:
logger.info("Transposing")
if len(pointer.shape) == 2: # copying into linear layer
lowerCAmelCase_ : int = array.squeeze().transpose()
else:
lowerCAmelCase_ : List[str] = np.transpose(A__ , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''')
lowerCAmelCase_ : Any = torch.from_numpy(A__)
tf_weights.pop(A__ , A__)
tf_weights.pop(name + "/RMSProp" , A__)
tf_weights.pop(name + "/RMSProp_1" , A__)
tf_weights.pop(name + "/ExponentialMovingAverage" , A__)
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Any = features.shape[-2:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = conv_layer.stride
lowerCAmelCase_ , lowerCAmelCase_ : int = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase_ : Optional[Any] = max(kernel_height - stride_height , 0)
else:
lowerCAmelCase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
lowerCAmelCase_ : Any = max(kernel_width - stride_width , 0)
else:
lowerCAmelCase_ : Any = max(kernel_width - (in_width % stride_width) , 0)
lowerCAmelCase_ : int = pad_along_width // 2
lowerCAmelCase_ : List[Any] = pad_along_width - pad_left
lowerCAmelCase_ : List[Any] = pad_along_height // 2
lowerCAmelCase_ : Optional[Any] = pad_along_height - pad_top
lowerCAmelCase_ : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A__ , A__ , "constant" , 0.0)
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : MobileNetVaConfig ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] = 1 ,lowerCAmelCase__ : Optional[int] = 1 ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[bool] = True ,lowerCAmelCase__ : Optional[bool or str] = True ,) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Dict = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowerCAmelCase_ : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase_ : List[Any] = nn.Convad(
in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=__lowerCamelCase ,stride=__lowerCamelCase ,padding=__lowerCamelCase ,groups=__lowerCamelCase ,bias=__lowerCamelCase ,padding_mode="zeros" ,)
if use_normalization:
lowerCAmelCase_ : Union[str, Any] = nn.BatchNormad(
num_features=__lowerCamelCase ,eps=config.layer_norm_eps ,momentum=0.9_997 ,affine=__lowerCamelCase ,track_running_stats=__lowerCamelCase ,)
else:
lowerCAmelCase_ : Optional[int] = None
if use_activation:
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,__lowerCamelCase ):
lowerCAmelCase_ : Optional[int] = ACTaFN[config.hidden_act]
else:
lowerCAmelCase_ : str = config.hidden_act
else:
lowerCAmelCase_ : Union[str, Any] = None
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : torch.Tensor ) -> Any:
'''simple docstring'''
if self.config.tf_padding:
lowerCAmelCase_ : Dict = apply_tf_padding(__lowerCamelCase ,self.convolution )
lowerCAmelCase_ : int = self.convolution(__lowerCamelCase )
if self.normalization is not None:
lowerCAmelCase_ : Optional[int] = self.normalization(__lowerCamelCase )
if self.activation is not None:
lowerCAmelCase_ : Dict = self.activation(__lowerCamelCase )
return features
class __snake_case ( __snake_case ):
"""simple docstring"""
UpperCamelCase_ = MobileNetVaConfig
UpperCamelCase_ = load_tf_weights_in_mobilenet_va
UpperCamelCase_ = 'mobilenet_v1'
UpperCamelCase_ = 'pixel_values'
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[nn.Linear, nn.Convad] ) -> Dict:
'''simple docstring'''
if isinstance(__lowerCamelCase ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_lowercase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowercase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , __snake_case , )
class __snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : str ,lowerCAmelCase__ : MobileNetVaConfig ,lowerCAmelCase__ : bool = True ) -> str:
'''simple docstring'''
super().__init__(__lowerCamelCase )
lowerCAmelCase_ : Any = config
lowerCAmelCase_ : Dict = 32
lowerCAmelCase_ : Optional[int] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
lowerCAmelCase_ : List[str] = MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=config.num_channels ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=2 ,)
lowerCAmelCase_ : Union[str, Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase_ : Tuple = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase_ : Optional[int] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=strides[i] ,groups=__lowerCamelCase ,) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=1 ,) )
lowerCAmelCase_ : List[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[torch.Tensor] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowerCAmelCase_ : Union[str, Any] = self.conv_stem(__lowerCamelCase )
lowerCAmelCase_ : List[str] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase_ : Dict = layer_module(__lowerCamelCase )
if output_hidden_states:
lowerCAmelCase_ : Tuple = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : Optional[int] = hidden_states
if self.pooler is not None:
lowerCAmelCase_ : Optional[Any] = torch.flatten(self.pooler(__lowerCamelCase ) ,start_dim=1 )
else:
lowerCAmelCase_ : Union[str, Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=__lowerCamelCase ,)
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __snake_case , )
class __snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : MobileNetVaConfig ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__lowerCamelCase )
lowerCAmelCase_ : Tuple = config.num_labels
lowerCAmelCase_ : Any = MobileNetVaModel(__lowerCamelCase )
lowerCAmelCase_ : str = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase_ : List[Any] = nn.Dropout(config.classifier_dropout_prob ,inplace=__lowerCamelCase )
lowerCAmelCase_ : int = nn.Linear(__lowerCamelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[torch.Tensor] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[torch.Tensor] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : List[str] = self.mobilenet_va(__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : Union[str, Any] = self.classifier(self.dropout(__lowerCamelCase ) )
lowerCAmelCase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : Tuple = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : Tuple = "single_label_classification"
else:
lowerCAmelCase_ : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase_ : Optional[Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : int = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCAmelCase_ : Optional[int] = loss_fct(__lowerCamelCase ,__lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : str = CrossEntropyLoss()
lowerCAmelCase_ : Any = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : Optional[Any] = BCEWithLogitsLoss()
lowerCAmelCase_ : Union[str, Any] = loss_fct(__lowerCamelCase ,__lowerCamelCase )
if not return_dict:
lowerCAmelCase_ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states ,)
| 659 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
SCREAMING_SNAKE_CASE = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class lowerCamelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase = None , **lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
UpperCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ = 1
UpperCAmelCase_ = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A__ ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , lowerCAmelCase ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def A__ ( self , lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 579 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase: Optional[int] ={
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any =[
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] =[
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase: int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 607 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a_ = logging.get_logger(__name__)
a_ = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
a_ = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
a_ = {
'jukebox': 512,
}
class __lowerCAmelCase ( __snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=["v3", "v2", "v2"] , __UpperCAmelCase=512 , __UpperCAmelCase=5 , __UpperCAmelCase="<|endoftext|>" , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
super().__init__(
unk_token=__lowerCamelCase , n_genres=__lowerCamelCase , version=__lowerCamelCase , max_n_lyric_tokens=__lowerCamelCase , **__lowerCamelCase , )
__lowerCamelCase = version
__lowerCamelCase = max_n_lyric_tokens
__lowerCamelCase = n_genres
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(__lowerCamelCase )
__lowerCamelCase = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowerCamelCase = oov.replace(r'''\-\'''' , r'''\-+\'''' )
__lowerCamelCase = regex.compile(__lowerCamelCase )
__lowerCamelCase = {v: k for k, v in self.artists_encoder.items()}
__lowerCamelCase = {v: k for k, v in self.genres_encoder.items()}
__lowerCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = [self.artists_encoder.get(__lowerCamelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCamelCase ) ):
__lowerCamelCase = [self.genres_encoder.get(__lowerCamelCase , 0 ) for genre in list_genres[genres]]
__lowerCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowerCamelCase = [[self.lyrics_encoder.get(__lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return list(__lowerCamelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.prepare_for_tokenization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__lowerCamelCase = self._tokenize(__lowerCamelCase )
return artist, genre, lyrics
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowerCamelCase = artists[idx].lower()
__lowerCamelCase = [genres[idx].lower()]
else:
__lowerCamelCase = self._normalize(artists[idx] ) + '''.v2'''
__lowerCamelCase = [
self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowerCamelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__lowerCamelCase = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )}
__lowerCamelCase = 0
__lowerCamelCase = len(__lowerCamelCase ) + 1
__lowerCamelCase = self.vocab
__lowerCamelCase = {v: k for k, v in self.vocab.items()}
__lowerCamelCase = ''''''
else:
__lowerCamelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__lowerCamelCase = self._run_strip_accents(__lowerCamelCase )
__lowerCamelCase = lyrics.replace('''\\''' , '''\n''' )
__lowerCamelCase = self.out_of_vocab.sub('''''' , __lowerCamelCase ), [], []
return artists, genres, lyrics
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = unicodedata.normalize('''NFD''' , __lowerCamelCase )
__lowerCamelCase = []
for char in text:
__lowerCamelCase = unicodedata.category(__lowerCamelCase )
if cat == "Mn":
continue
output.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = (
[chr(__lowerCamelCase ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__lowerCamelCase = frozenset(__lowerCamelCase )
__lowerCamelCase = re.compile(r'''_+''' )
__lowerCamelCase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__lowerCamelCase = pattern.sub('''_''' , __lowerCamelCase ).strip('''_''' )
return text
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return " ".join(__lowerCamelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
# Convert to TensorType
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__lowerCamelCase = TensorType(__lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__lowerCamelCase = tf.constant
__lowerCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__lowerCamelCase = torch.tensor
__lowerCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__lowerCamelCase = jnp.array
__lowerCamelCase = _is_jax
else:
__lowerCamelCase = np.asarray
__lowerCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowerCamelCase = [inputs]
if not is_tensor(__lowerCamelCase ):
__lowerCamelCase = as_tensor(__lowerCamelCase )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="pt" ):
'''simple docstring'''
__lowerCamelCase = [0, 0, 0]
__lowerCamelCase = [artist] * len(self.version )
__lowerCamelCase = [genres] * len(self.version )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.tokenize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self._convert_token_to_id(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__lowerCamelCase = [-INFINITY] * len(full_tokens[-1] )
__lowerCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCamelCase ) )
__lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCamelCase ) )
__lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.artists_decoder.get(__lowerCamelCase )
__lowerCamelCase = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index]
__lowerCamelCase = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 175 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = int(A__ )
_A , _A , _A = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}'
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any]=3_0_0 ):
# docstyle-ignore
return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = '<table border=\"1\" class=\"dataframe\">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_A = F'{elt:.6f}' if isinstance(A__ , A__ ) else str(A__ )
html_code += F' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 5
__lowerCAmelCase = 0.2
def __init__( self : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : Optional[str] = None, UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional["NotebookTrainingTracker"] = None, UpperCamelCase__ : int = 3_00, ) -> List[str]:
_A = total
_A = '' if prefix is None else prefix
_A = leave
_A = parent
_A = width
_A = None
_A = None
_A = None
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : bool = False, UpperCamelCase__ : str = None ) -> Any:
_A = value
if comment is not None:
_A = comment
if self.last_value is None:
_A = _A = time.time()
_A = _A = value
_A = _A = None
_A = self.warmup
_A = 1
self.update_bar(__lowerCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_A = time.time()
_A = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_A = self.elapsed_time / (value - self.start_value)
else:
_A = None
if value >= self.total:
_A = self.total
_A = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_A = self.average_time_per_item * (self.total - value)
self.update_bar(__lowerCamelCase )
_A = value
_A = current_time
if self.average_time_per_item is None:
_A = 1
else:
_A = max(int(self.update_every / self.average_time_per_item ), 1 )
def __UpperCAmelCase ( self : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any]=None ) -> Any:
_A = ' ' * (len(str(self.total ) ) - len(str(__lowerCamelCase ) )) + str(__lowerCamelCase )
if self.elapsed_time is None:
_A = f'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
_A = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
_A = (
f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
f' {format_time(self.predicted_remaining )}'
)
self.label += f', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f', {self.comment}]'
self.display()
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_A = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_A = disp.display(disp.HTML(self.html_code ), display_id=__lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class lowercase_ ( __snake_case ):
"""simple docstring"""
def __init__( self : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str]=None ) -> Optional[int]:
super().__init__(__lowerCamelCase )
_A = None if column_names is None else [column_names]
_A = None
def __UpperCAmelCase ( self : Any ) -> Any:
_A = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_A = disp.display(disp.HTML(self.html_code ), display_id=__lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Optional[Any] ) -> str:
if self.inner_table is None:
_A = [list(values.keys() ), list(values.values() )]
else:
_A = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowerCamelCase )
_A = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Any, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : str=3_00 ) -> Optional[Any]:
_A = NotebookProgressBar(__lowerCamelCase, prefix=__lowerCamelCase, parent=self, width=__lowerCamelCase )
return self.child_bar
def __UpperCAmelCase ( self : Tuple ) -> Any:
_A = None
self.display()
class lowercase_ ( __snake_case ):
"""simple docstring"""
def __init__( self : Dict ) -> List[Any]:
_A = None
_A = None
_A = False
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, **UpperCamelCase__ : Tuple ) -> str:
_A = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_A = 0
_A = 0
_A = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_A = NotebookTrainingTracker(state.max_steps, __lowerCamelCase )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple, **UpperCamelCase__ : List[str] ) -> str:
_A = int(state.epoch ) if int(state.epoch ) == state.epoch else f'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1, comment=f'Epoch {epoch}/{state.num_train_epochs}', force_update=self._force_next_update, )
_A = False
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[str], UpperCamelCase__ : int, UpperCamelCase__ : Any=None, **UpperCamelCase__ : int ) -> str:
if not has_length(__lowerCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_A = self.training_tracker.add_child(len(__lowerCamelCase ) )
else:
_A = NotebookProgressBar(len(__lowerCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], **UpperCamelCase__ : List[Any] ) -> Any:
if self.prediction_bar is not None:
self.prediction_bar.close()
_A = None
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : int, UpperCamelCase__ : Tuple, UpperCamelCase__ : Dict=None, **UpperCamelCase__ : Dict ) -> int:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_A = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_A = state.global_step
self.training_tracker.write_line(__lowerCamelCase )
def __UpperCAmelCase ( self : str, UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int]=None, **UpperCamelCase__ : int ) -> List[Any]:
if self.training_tracker is not None:
_A = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_A = log['loss']
break
if self.first_column == "Epoch":
_A = int(state.epoch )
else:
_A = state.global_step
_A = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_A = re.sub(r'\_loss$', '', __lowerCamelCase )
_A = metrics.pop('total_flos', __lowerCamelCase )
_A = metrics.pop('epoch', __lowerCamelCase )
_A = metrics.pop(f'{metric_key_prefix}_runtime', __lowerCamelCase )
_A = metrics.pop(f'{metric_key_prefix}_samples_per_second', __lowerCamelCase )
_A = metrics.pop(f'{metric_key_prefix}_steps_per_second', __lowerCamelCase )
_A = metrics.pop(f'{metric_key_prefix}_jit_compilation_time', __lowerCamelCase )
for k, v in metrics.items():
if k == f'{metric_key_prefix}_loss':
_A = v
else:
_A = k.split('_' )
_A = ' '.join([part.capitalize() for part in splits[1:]] )
_A = v
self.training_tracker.write_line(__lowerCamelCase )
self.training_tracker.remove_child()
_A = None
# Evaluation takes a long time so we should force the next update.
_A = True
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, **UpperCamelCase__ : Any ) -> Dict:
self.training_tracker.update(
state.global_step, comment=f'Epoch {int(state.epoch )}/{state.num_train_epochs}', force_update=__lowerCamelCase )
_A = None
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowerCAmelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
_lowerCAmelCase : Any = ['names', 'prefix']
_lowerCAmelCase : Tuple = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_lowerCAmelCase : Dict = ['encoding_errors', 'on_bad_lines']
_lowerCAmelCase : str = ['date_format']
@dataclass
class lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case = ','
snake_case = None
snake_case = 'infer'
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = True
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = False
snake_case = None
snake_case = None
snake_case = None
snake_case = True
snake_case = True
snake_case = False
snake_case = True
snake_case = None
snake_case = '.'
snake_case = None
snake_case = '"'
snake_case = 0
snake_case = None
snake_case = None
snake_case = None
snake_case = None
snake_case = True
snake_case = True
snake_case = 0
snake_case = True
snake_case = False
snake_case = None
snake_case = 10_000
snake_case = None
snake_case = 'strict'
snake_case = 'error'
snake_case = None
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if self.delimiter is not None:
lowerCamelCase = self.delimiter
if self.column_names is not None:
lowerCamelCase = self.column_names
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case = CsvConfig
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self : Any , __snake_case : str ) -> Optional[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
lowerCamelCase = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase = [files]
lowerCamelCase = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCamelCase = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase = [files]
lowerCamelCase = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={'files': files} ) )
return splits
def lowerCamelCase__ ( self : List[Any] , __snake_case : pa.Table ) -> Dict:
'''simple docstring'''
if self.config.features is not None:
lowerCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
lowerCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCamelCase = table_cast(__lowerCamelCase , __lowerCamelCase )
return pa_table
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
lowerCamelCase = pd.read_csv(__lowerCamelCase , iterator=__lowerCamelCase , dtype=__lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCamelCase ):
lowerCamelCase = pa.Table.from_pandas(__lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}''' )
raise
| 246 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Tuple = logging.get_logger(__name__)
def _snake_case (__lowercase):
UpperCamelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
UpperCamelCase_ = DetaConfig(
backbone_config=A__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=A__ , with_box_refine=A__ , two_stage=A__ , )
# set labels
UpperCamelCase_ = 'huggingface/label-files'
if "o365" in model_name:
UpperCamelCase_ = 366
UpperCamelCase_ = 'object365-id2label.json'
else:
UpperCamelCase_ = 91
UpperCamelCase_ = 'coco-detection-id2label.json'
UpperCamelCase_ = num_labels
UpperCamelCase_ = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='dataset')) , 'r'))
UpperCamelCase_ = {int(A__): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _snake_case (__lowercase):
UpperCamelCase_ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias'))
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight'))
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias'))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"""))
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight"""))
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias"""))
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight'))
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias'))
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight'))
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias'))
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight'))
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias'))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias"""))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias"""))
# fmt: on
return rename_keys
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = dct.pop(A__)
UpperCamelCase_ = val
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
UpperCamelCase_ = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""")
UpperCamelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ = in_proj_weight[:dim, :]
UpperCamelCase_ = in_proj_bias[: dim]
UpperCamelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase_ = in_proj_weight[
-dim :, :
]
UpperCamelCase_ = in_proj_bias[-dim :]
# fmt: on
def _snake_case (__lowercase , __lowercase):
# transformer decoder self-attention layers
UpperCamelCase_ = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""")
UpperCamelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""")
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ = in_proj_weight[:hidden_size, :]
UpperCamelCase_ = in_proj_bias[:hidden_size]
UpperCamelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase_ = in_proj_weight[-hidden_size:, :]
UpperCamelCase_ = in_proj_bias[-hidden_size:]
def _snake_case ():
UpperCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ = Image.open(requests.get(A__ , stream=A__).raw)
return im
@torch.no_grad()
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_deta_config(A__)
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase_ = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth')
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth')
else:
raise ValueError(f"""Model name {model_name} not supported""")
UpperCamelCase_ = torch.load(A__ , map_location='cpu')['model']
# original state dict
for name, param in state_dict.items():
print(A__ , param.shape)
# rename keys
UpperCamelCase_ = create_rename_keys(A__)
for src, dest in rename_keys:
rename_key(A__ , A__ , A__)
read_in_swin_q_k_v(A__ , config.backbone_config)
read_in_decoder_q_k_v(A__ , A__)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase_ = state_dict.pop(A__)
UpperCamelCase_ = val
if "input_proj" in key:
UpperCamelCase_ = state_dict.pop(A__)
UpperCamelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase_ = state_dict.pop(A__)
UpperCamelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCamelCase_ = DetaForObjectDetection(A__)
model.load_state_dict(A__)
model.eval()
UpperCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(A__)
# load image processor
UpperCamelCase_ = DetaImageProcessor(format='coco_detection')
# verify our conversion on image
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = processor(images=A__ , return_tensors='pt')
UpperCamelCase_ = encoding['pixel_values']
UpperCamelCase_ = model(pixel_values.to(A__))
# verify logits
print('Logits:' , outputs.logits[0, :3, :3])
print('Boxes:' , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
UpperCamelCase_ = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]])
UpperCamelCase_ = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]])
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]])
UpperCamelCase_ = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A__) , atol=1e-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A__) , atol=1e-4)
print('Everything ok!')
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""")
Path(A__).mkdir(exist_ok=A__)
model.save_pretrained(A__)
processor.save_pretrained(A__)
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...')
model.push_to_hub(f"""jozhang97/{model_name}""")
processor.push_to_hub(f"""jozhang97/{model_name}""")
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 23 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
def a__ ( A_, A_ = " " ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 0
for index, char in enumerate(A__ ):
if char == separator:
split_words.append(string[last_index:index] )
__magic_name__ = index + 1
elif index + 1 == len(A__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 529 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class UpperCAmelCase_ ( __snake_case , __snake_case ):
'''simple docstring'''
__A : int = "bit"
__A : Any = ["preactivation", "bottleneck"]
__A : List[str] = ["SAME", "VALID"]
def __init__( self , __A=3 , __A=64 , __A=[256, 512, 1024, 2048] , __A=[3, 4, 6, 3] , __A="preactivation" , __A="relu" , __A=None , __A=32 , __A=0.0 , __A=False , __A=32 , __A=1 , __A=None , __A=None , **__A , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowerCamelCase : List[str] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
lowerCamelCase : Dict = num_channels
lowerCamelCase : List[Any] = embedding_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : List[str] = depths
lowerCamelCase : List[Any] = layer_type
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Dict = global_padding
lowerCamelCase : str = num_groups
lowerCamelCase : Union[str, Any] = drop_path_rate
lowerCamelCase : Optional[int] = embedding_dynamic_padding
lowerCamelCase : List[str] = output_stride
lowerCamelCase : Tuple = width_factor
lowerCamelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
lowerCamelCase , lowerCamelCase : Tuple = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 340 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
import random
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ) -> Union[str, Any]:
_lowercase : Union[str, Any] = {i: [] for i in range(A__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(A__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(A__ ):
for j in range(i + 1 , A__ ):
if random.random() < probability:
graph[i].append(A__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(A__ )
return graph
def UpperCamelCase_( lowerCamelCase_ ) -> int:
return {
i: [j for j in range(A__ ) if i != j] for i in range(A__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( __snake_case ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=__lowerCamelCase ,speech_processor=__lowerCamelCase ,vae=__lowerCamelCase ,text_encoder=__lowerCamelCase ,tokenizer=__lowerCamelCase ,unet=__lowerCamelCase ,scheduler=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,)
def __lowerCamelCase ( self ,_A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
_lowerCAmelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__lowerCamelCase )
@torch.no_grad()
def __call__( self ,_A ,_A=1_6000 ,_A = 512 ,_A = 512 ,_A = 50 ,_A = 7.5 ,_A = None ,_A = 1 ,_A = 0.0 ,_A = None ,_A = None ,_A = "pil" ,_A = True ,_A = None ,_A = 1 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
__lowerCamelCase ,return_tensors='pt' ,sampling_rate=__lowerCamelCase ).input_features.to(self.device )
_lowerCAmelCase : int = self.speech_model.generate(__lowerCamelCase ,max_length=48_0000 )
_lowerCAmelCase : List[Any] = self.speech_processor.tokenizer.batch_decode(__lowerCamelCase ,skip_special_tokens=__lowerCamelCase ,normalize=__lowerCamelCase )[
0
]
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
_lowerCAmelCase : List[Any] = 1
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
_lowerCAmelCase : List[str] = len(__lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase ,__lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCamelCase )}.""" )
# get prompt text embeddings
_lowerCAmelCase : Optional[Any] = self.tokenizer(
__lowerCamelCase ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
_lowerCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = text_embeddings.shape
_lowerCAmelCase : int = text_embeddings.repeat(1 ,__lowerCamelCase ,1 )
_lowerCAmelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt ,__lowerCamelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : Optional[int] = 42
if negative_prompt is None:
_lowerCAmelCase : List[Any] = [''] * batch_size
elif type(__lowerCamelCase ) is not type(__lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCamelCase )} !="""
F""" {type(__lowerCamelCase )}.""" )
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
_lowerCAmelCase : Dict = [negative_prompt]
elif batch_size != len(__lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase : Union[str, Any] = negative_prompt
_lowerCAmelCase : List[Any] = text_input_ids.shape[-1]
_lowerCAmelCase : List[Any] = self.tokenizer(
__lowerCamelCase ,padding='max_length' ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,return_tensors='pt' ,)
_lowerCAmelCase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : int = uncond_embeddings.shape[1]
_lowerCAmelCase : Any = uncond_embeddings.repeat(1 ,__lowerCamelCase ,1 )
_lowerCAmelCase : int = uncond_embeddings.view(batch_size * num_images_per_prompt ,__lowerCamelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase : Union[str, Any] = torch.randn(__lowerCamelCase ,generator=__lowerCamelCase ,device='cpu' ,dtype=__lowerCamelCase ).to(
self.device )
else:
_lowerCAmelCase : Tuple = torch.randn(__lowerCamelCase ,generator=__lowerCamelCase ,device=self.device ,dtype=__lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Any = {}
if accepts_eta:
_lowerCAmelCase : Tuple = eta
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = self.scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
# predict the noise residual
_lowerCAmelCase : Any = self.unet(__lowerCamelCase ,__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = noise_pred.chunk(2 )
_lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Optional[Any] = self.scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
_lowerCAmelCase : Tuple = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase : str = self.vae.decode(__lowerCamelCase ).sample
_lowerCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Any = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__lowerCamelCase ,nsfw_content_detected=__lowerCamelCase )
| 259 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase ) | 16 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
_lowercase = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
_lowercase = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = set()
lowerCAmelCase_ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Optional[Any] = char
lowerCAmelCase_ : Union[str, Any] = set(A__)
return pairs
class __snake_case ( __snake_case ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : List[str]="<s>" ,lowerCAmelCase__ : int="</s>" ,lowerCAmelCase__ : Dict="</s>" ,lowerCAmelCase__ : Tuple="<s>" ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : Optional[Any]="<pad>" ,lowerCAmelCase__ : Dict="<mask>" ,**lowerCAmelCase__ : Optional[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : List[Any] = merges_file
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : List[str] = 2
lowerCAmelCase_ : Any = 3
self.add_from_file(__lowerCamelCase )
lowerCAmelCase_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[Any] = merges_handle.read().split("\n" )[:-1]
lowerCAmelCase_ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase_ : Optional[Any] = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase_ : Dict = {}
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> int:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> Optional[Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self : str ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Optional[Any] = tuple(__lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCAmelCase_ : List[Any] = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase_ : Optional[int] = min(__lowerCamelCase ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(__lowerCamelCase ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : str = bigram
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : str = 0
while i < len(__lowerCamelCase ):
try:
lowerCAmelCase_ : Any = word.index(__lowerCamelCase ,__lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Optional[int] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Dict = tuple(__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCAmelCase_ : Tuple = get_pairs(__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = "@@ ".join(__lowerCamelCase )
lowerCAmelCase_ : Tuple = word[:-4]
lowerCAmelCase_ : str = word
return word
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Optional[int] = re.findall(R"\S+\n?" ,__lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(" " ) ) )
return split_tokens
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(__lowerCamelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
return self.decoder.get(__lowerCamelCase ,self.unk_token )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = " ".join(__lowerCamelCase ).replace("@@ " ,"" ).strip()
return out_string
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file ,__lowerCamelCase )
return out_vocab_file, out_merge_file
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
try:
with open(__lowerCamelCase ,"r" ,encoding="utf-8" ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
lowerCAmelCase_ : Any = f.readlines()
for lineTmp in lines:
lowerCAmelCase_ : List[Any] = lineTmp.strip()
lowerCAmelCase_ : List[Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
lowerCAmelCase_ : Any = line[:idx]
lowerCAmelCase_ : Union[str, Any] = len(self.encoder )
| 659 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=30 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = ViTMSNModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase_ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = ViTMSNForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase_ = model(__lowerCamelCase , labels=__lowerCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = ViTMSNForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : List[str] = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
def A__ ( self ):
UpperCAmelCase_ = ViTMSNModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__lowerCamelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def A__ ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ViTMSNModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def A__ ( self ):
torch.manual_seed(2 )
UpperCAmelCase_ = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__lowerCamelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase_ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 579 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__lowerCamelCase , )
assert hasattr(self , """env""" )
def _UpperCAmelCase ( self , snake_case ) -> int:
"""simple docstring"""
# configuration for running training on smdistributed Model Parallel
lowercase : Optional[int] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowercase : Optional[int] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowercase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowercase : Tuple = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="""py36""" , )
def _UpperCAmelCase ( self , snake_case ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(__lowerCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _UpperCAmelCase ( self , snake_case ) -> Any:
"""simple docstring"""
# create estimator
lowercase : Optional[Any] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
lowercase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowercase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCamelCase )
| 607 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 16 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a_ = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
a_ = dataset.iloc[:, 1:2].values
a_ = dataset.iloc[:, 2].values
a_ = train_test_split(X, y, test_size=0.2, random_state=0)
a_ = PolynomialFeatures(degree=4)
a_ = poly_reg.fit_transform(X)
a_ = LinearRegression()
pol_reg.fit(X_poly, y)
def a__ ( ):
plt.scatter(A__ ,A__ ,color='''red''' )
plt.plot(A__ ,pol_reg.predict(poly_reg.fit_transform(A__ ) ) ,color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 175 |
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ ) | 16 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[int] ):
# Initialise PyTorch model
_A = BertConfig.from_json_file(A__ )
print(F'Building PyTorch model from configuration: {config}' )
_A = BertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 107 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"] | 16 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __snake_case : Dict , __snake_case : List[Any]=7 , __snake_case : Any=3 , __snake_case : Any=30 , __snake_case : Any=400 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=None , __snake_case : Optional[int]=0.9 , __snake_case : Dict=None , __snake_case : Dict=True , __snake_case : List[Any]=[0.5, 0.5, 0.5] , __snake_case : Dict=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
lowerCamelCase = size if size is not None else {'shortest_edge': 30}
lowerCamelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize_and_center_crop
lowerCamelCase = size
lowerCamelCase = crop_pct
lowerCamelCase = crop_size
lowerCamelCase = do_normalize
lowerCamelCase = image_mean
lowerCamelCase = image_std
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'crop_pct' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'image_std' ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 246 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
snake_case__ : Any = logging.getLogger(__name__)
def _snake_case (__lowercase=2 , __lowercase=3 , __lowercase=16 , __lowercase = 10 , __lowercase = 2):
def get_dataset(__lowercase):
UpperCamelCase_ = torch.randn(batch_size * n_batches , 1)
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1))
UpperCamelCase_ = get_dataset(A__)
UpperCamelCase_ = get_dataset(A__)
UpperCamelCase_ = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4)
UpperCamelCase_ = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4)
return (train_dataloader, valid_dataloader)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None):
UpperCamelCase_ = []
for epoch in range(A__):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase_ , UpperCamelCase_ = batch
UpperCamelCase_ = model(A__)
UpperCamelCase_ = torch.nn.functional.mse_loss(A__ , A__)
accelerator.backward(A__)
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[str]:
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.randn(1 ) )
UpperCamelCase_ = nn.Parameter(torch.randn(1 ) )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
return x * self.a + self.b
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(total_limit=1 , project_dir=__lowerCamelCase , automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
UpperCamelCase_ = Accelerator(project_config=__lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _UpperCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
# Train baseline
UpperCamelCase_ = Accelerator()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
UpperCamelCase_ = os.path.join(__lowerCamelCase , 'initial' )
accelerator.save_state(__lowerCamelCase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
UpperCamelCase_ = train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = Accelerator()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.load_state(__lowerCamelCase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase_ = train(2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save everything
UpperCamelCase_ = os.path.join(__lowerCamelCase , 'checkpoint' )
accelerator.save_state(__lowerCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowerCamelCase )
test_rands += train(1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
accelerator.save_state()
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
UpperCamelCase_ = train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__lowerCamelCase )
UpperCamelCase_ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.load_state(os.path.join(__lowerCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase_ = train(2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowerCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = torch.tensor([1, 2, 3] )
UpperCamelCase_ = torch.tensor([2, 3, 4] )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(net.parameters() )
UpperCamelCase_ = Accelerator()
with self.assertRaises(__lowerCamelCase ) as ve:
accelerator.register_for_checkpointing(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase_ = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def _UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ = torch.optim.lr_scheduler.StepLR(__lowerCamelCase , step_size=1 , gamma=0.9_9 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
accelerator.save_state()
UpperCamelCase_ = scheduler.state_dict()
train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowerCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(__lowerCamelCase , scheduler.state_dict() )
def _UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase , total_limit=2 )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
UpperCamelCase_ = accelerator.prepare(__lowerCamelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowerCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = '/tmp/accelerate/state_checkpointing'
snake_case__ : Any = DummyModel()
snake_case__ : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
snake_case__ : Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
snake_case__ : List[Any] = dummy_dataloaders()
snake_case__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
snake_case__ : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
snake_case__ : Any = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
snake_case__ : Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
snake_case__ : Dict = group['params'][0].device
break
assert param_device.type == accelerator.device.type
snake_case__ : int = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
snake_case__ : Optional[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
snake_case__ : Any = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 23 |
from __future__ import annotations
__A : str = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ):
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = g_cost
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = self.calculate_heuristic()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def _snake_case ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : List[Any] , __lowerCamelCase : Node ):
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _snake_case ( self : str , __lowerCamelCase : Node | None ):
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Optional[Any] = (0, 0)
__A : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : List[str] = GreedyBestFirst(init, goal)
__A : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem) | 16 | 0 |
from __future__ import annotations
def a__ ( A_ ):
'''simple docstring'''
create_state_space_tree(A__, [], 0, [0 for i in range(len(A__ ) )] )
def a__ ( A_, A_, A_, A_, ):
'''simple docstring'''
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__magic_name__ = True
create_state_space_tree(A__, A__, index + 1, A__ )
current_sequence.pop()
__magic_name__ = False
__lowerCAmelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowerCAmelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 529 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowercase_( SCREAMING_SNAKE_CASE_ = "laptop" ):
'''simple docstring'''
lowerCamelCase : Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCamelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
lowerCamelCase : Tuple = BeautifulSoup(requests.get(A__ , headers=A__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
lowerCamelCase : str = item.ha.text
lowerCamelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
lowerCamelCase : Tuple = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
lowerCamelCase : Tuple = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
lowerCamelCase : Union[str, Any] = "Not available"
try:
lowerCamelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
lowerCamelCase : str = ""
try:
lowerCamelCase : Union[str, Any] = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
lowerCamelCase : Optional[int] = float("nan" )
except AttributeError:
pass
lowerCamelCase : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase : Optional[int] = " "
lowerCamelCase : Dict = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 340 |
def __a ( A__ : float , A__ : float ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 16 | 0 |
from math import factorial
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_lowercase : Union[str, Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_lowercase : List[str] = float(factorial(A__ ) )
coefficient /= factorial(A__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 89 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 | 0 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase = list[tuple[int, int]]
_lowerCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __UpperCamelCase :
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : int = pos_x
_lowerCAmelCase : str = pos_y
_lowerCAmelCase : Any = (pos_y, pos_x)
_lowerCAmelCase : Optional[Any] = goal_x
_lowerCAmelCase : List[Any] = goal_y
_lowerCAmelCase : str = g_cost
_lowerCAmelCase : Any = parent
_lowerCAmelCase : int = self.calculate_heuristic()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = abs(self.pos_x - self.goal_x )
_lowerCAmelCase : List[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self ,_A ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __UpperCamelCase :
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,__lowerCamelCase )
_lowerCAmelCase : int = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9999 ,__lowerCamelCase )
_lowerCAmelCase : str = [self.start]
_lowerCAmelCase : int = []
_lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCAmelCase : int = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
_lowerCAmelCase : Any = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
_lowerCAmelCase : Dict = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for action in delta:
_lowerCAmelCase : List[str] = parent.pos_x + action[1]
_lowerCAmelCase : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase ,__lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,__lowerCamelCase ,) )
return successors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = node
_lowerCAmelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_lowerCAmelCase = (0, 0)
_lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
_lowerCAmelCase = GreedyBestFirst(init, goal)
_lowerCAmelCase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_lowerCAmelCase = 2
for elem in grid:
print(elem)
| 259 |
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __snake_case ):
"""simple docstring"""
UpperCamelCase_ = (UniPCMultistepScheduler,)
UpperCamelCase_ = (('num_inference_steps', 2_5),)
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Optional[Any]=0 ,**lowerCAmelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : int = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Optional[int] = kwargs.pop("num_inference_steps" ,__lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = self.dummy_sample
lowerCAmelCase_ : Dict = 0.1 * sample
lowerCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Any = self.get_scheduler_config(**__lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
lowerCAmelCase_ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
lowerCAmelCase_ : List[Any] = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
lowerCAmelCase_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = sample, sample
for t in range(__lowerCamelCase ,time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_ : List[str] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
lowerCAmelCase_ : Dict = new_scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Tuple=0 ,**lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Optional[int] = kwargs.pop("num_inference_steps" ,__lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.dummy_sample
lowerCAmelCase_ : int = 0.1 * sample
lowerCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Any = self.get_scheduler_config()
lowerCAmelCase_ : Optional[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
lowerCAmelCase_ : str = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : Optional[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
lowerCAmelCase_ : Tuple = new_scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str=None ,**lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
if scheduler is None:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : str = self.get_scheduler_config(**__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = scheduler_class(**__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase_ : Any = self.get_scheduler_config(**__lowerCamelCase )
lowerCAmelCase_ : List[Any] = scheduler_class(**__lowerCamelCase )
lowerCAmelCase_ : Any = 10
lowerCAmelCase_ : Any = self.dummy_model()
lowerCAmelCase_ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase_ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Optional[int] = kwargs.pop("num_inference_steps" ,__lowerCamelCase )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Any = self.get_scheduler_config()
lowerCAmelCase_ : Tuple = scheduler_class(**__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.dummy_sample
lowerCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase ,"set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase ,"set_timesteps" ):
lowerCAmelCase_ : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase_ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase_ : Optional[Any] = scheduler.timesteps[5]
lowerCAmelCase_ : Tuple = scheduler.timesteps[6]
lowerCAmelCase_ : Optional[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
lowerCAmelCase_ : str = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ : str = self.full_loop(scheduler=__lowerCamelCase )
lowerCAmelCase_ : str = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
lowerCAmelCase_ : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : str = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : str = self.full_loop(scheduler=__lowerCamelCase )
lowerCAmelCase_ : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase ,prediction_type=__lowerCamelCase ,sample_max_value=__lowerCamelCase ,solver_order=__lowerCamelCase ,solver_type=__lowerCamelCase ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase ,solver_type=__lowerCamelCase ,prediction_type=__lowerCamelCase ,)
lowerCAmelCase_ : Dict = self.full_loop(
solver_order=__lowerCamelCase ,solver_type=__lowerCamelCase ,prediction_type=__lowerCamelCase ,)
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__lowerCamelCase ,time_step=0 )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.full_loop()
lowerCAmelCase_ : int = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : str = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase_ : int = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.scheduler_classes[0]
lowerCAmelCase_ : int = self.get_scheduler_config(thresholding=__lowerCamelCase ,dynamic_thresholding_ratio=0 )
lowerCAmelCase_ : Optional[int] = scheduler_class(**__lowerCamelCase )
lowerCAmelCase_ : int = 10
lowerCAmelCase_ : Dict = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config(**__lowerCamelCase )
lowerCAmelCase_ : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 659 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase_ = ord(A__ )
if not _is_chinese_char(A__ ):
return 0
return 1
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = set()
for token in tokens:
UpperCAmelCase_ = len(A__ ) > 1 and is_chinese(A__ )
if chinese_word:
word_set.add(A__ )
UpperCAmelCase_ = list(A__ )
return word_list
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase_ = max([len(A__ ) for w in chinese_word_set] )
UpperCAmelCase_ = bert_tokens
UpperCAmelCase_ , UpperCAmelCase_ = 0, len(A__ )
while start < end:
UpperCAmelCase_ = True
if is_chinese(bert_word[start] ):
UpperCAmelCase_ = min(end - start , A__ )
for i in range(A__ , 1 , -1 ):
UpperCAmelCase_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase_ = "##" + bert_word[j]
UpperCAmelCase_ = start + i
UpperCAmelCase_ = False
break
if single_word:
start += 1
return bert_word
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = []
for i in range(0 , len(A__ ) , 100 ):
UpperCAmelCase_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCAmelCase_ = [get_chinese_word(A__ ) for r in res]
ltp_res.extend(A__ )
assert len(A__ ) == len(A__ )
UpperCAmelCase_ = []
for i in range(0 , len(A__ ) , 100 ):
UpperCAmelCase_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=A__ , truncation=A__ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(A__ ) == len(A__ )
UpperCAmelCase_ = []
for input_ids, chinese_word in zip(A__ , A__ ):
UpperCAmelCase_ = []
for id in input_ids:
UpperCAmelCase_ = bert_tokenizer._convert_id_to_token(A__ )
input_tokens.append(A__ )
UpperCAmelCase_ = add_sub_symbol(A__ , A__ )
UpperCAmelCase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A__ ):
if token[:2] == "##":
UpperCAmelCase_ = token[2:]
# save chinese tokens' pos
if len(A__ ) == 1 and _is_chinese_char(ord(A__ ) ):
ref_id.append(A__ )
ref_ids.append(A__ )
assert len(A__ ) == len(A__ )
return ref_ids
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.strip() for line in data if len(A__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase_ = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase_ = prepare_ref(A__ , A__ , A__ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
UpperCAmelCase_ = [json.dumps(A__ ) + "\n" for ref in ref_ids]
f.writelines(A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 579 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCamelCase__ :
def __init__( self , snake_case , snake_case=3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = parent
lowercase : str = batch_size
lowercase : int = seq_length
lowercase : List[str] = is_training
lowercase : Tuple = use_input_mask
lowercase : str = use_token_type_ids
lowercase : List[Any] = use_labels
lowercase : Optional[int] = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Any = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : str = max_position_embeddings
lowercase : List[Any] = type_vocab_size
lowercase : Tuple = type_sequence_label_size
lowercase : Union[str, Any] = initializer_range
lowercase : Tuple = num_labels
lowercase : Union[str, Any] = num_choices
lowercase : Optional[Any] = scope
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple = None
if self.use_input_mask:
lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
lowercase : str = None
lowercase : List[str] = None
lowercase : str = None
if self.use_labels:
lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
lowercase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCamelCase , )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = FalconModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
lowercase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = True
lowercase : Any = FalconModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Optional[int] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
lowercase : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
lowercase : Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[Any]:
"""simple docstring"""
lowercase : Dict = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Optional[Any]:
"""simple docstring"""
lowercase : int = True
lowercase : List[Any] = True
lowercase : Any = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
lowercase : Any = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
lowercase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["""hidden_states"""][0]
lowercase : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["""hidden_states"""][0]
# select random slice
lowercase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__UpperCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict = FalconModelTester(self )
lowercase : Any = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase , *lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowercase : Union[str, Any] = alibi
self.model_tester.create_and_check_model(__lowerCamelCase , *__lowerCamelCase )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = 3
lowercase : List[str] = input_dict["""input_ids"""]
lowercase : Tuple = input_ids.ne(1 ).to(__lowerCamelCase )
lowercase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase : str = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Optional[int] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] = 3
lowercase : List[str] = """single_label_classification"""
lowercase : Tuple = input_dict["""input_ids"""]
lowercase : Optional[int] = input_ids.ne(1 ).to(__lowerCamelCase )
lowercase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase : List[Any] = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int = input_dict["""input_ids"""]
lowercase : List[Any] = FalconForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Optional[Any] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
lowercase : int = input_ids.shape[0]
lowercase : Tuple = model._convert_to_rw_cache(result.past_key_values )
lowercase : List[Any] = model._convert_cache_to_standard_format(__lowerCamelCase , __lowerCamelCase )
for layer in range(len(__lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = 3
lowercase : List[str] = """multi_label_classification"""
lowercase : Optional[int] = input_dict["""input_ids"""]
lowercase : Union[str, Any] = input_ids.ne(1 ).to(__lowerCamelCase )
lowercase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase : Optional[int] = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase : Dict = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowercase , lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCamelCase , """use_cache""" ):
return
lowercase : List[Any] = model_class(__lowerCamelCase ).to(__lowerCamelCase )
if "use_cache" not in inputs:
lowercase : Any = True
lowercase : str = model(**__lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowercase : Any = (
getattr(__lowerCamelCase , """decoder_layers""" , __lowerCamelCase )
or getattr(__lowerCamelCase , """num_decoder_layers""" , __lowerCamelCase )
or config.num_hidden_layers
)
lowercase : str = getattr(__lowerCamelCase , """num_kv_heads""" , config.num_attention_heads )
lowercase : List[str] = getattr(__lowerCamelCase , """d_model""" , config.hidden_size )
lowercase : Optional[Any] = embed_dim // num_attention_heads
lowercase : str = outputs["""past_key_values"""]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
lowercase , lowercase : Optional[Any] = inputs["""input_ids"""].shape
for i in range(__lowerCamelCase ):
if config.new_decoder_architecture:
lowercase : str = config.num_attention_heads
elif config.multi_query:
lowercase : List[str] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : Dict = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowercase : str = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__lowerCamelCase )
lowercase : Optional[int] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCamelCase )
lowercase : Union[str, Any] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowercase : Optional[int] = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=1_9 )
lowercase : Tuple = tokenizer.batch_decode(__lowerCamelCase )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowercase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
lowercase : Tuple = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(__lowerCamelCase )
lowercase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowercase : Tuple = AutoTokenizer.from_pretrained(__lowerCamelCase )
lowercase : Optional[Any] = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(device=__lowerCamelCase )
lowercase : List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCamelCase )
# Test results are the same with and without cache
lowercase : int = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=2_0 , use_cache=__lowerCamelCase )
lowercase : Optional[int] = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=2_0 , use_cache=__lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 607 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Union[str, Any] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Any=False , **__lowerCamelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Dict , __lowerCamelCase : List[Any] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : int , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 16 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __lowerCAmelCase ( __snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCamelCase ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(__lowerCamelCase , normalizer_state.pop('''type''' ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**__lowerCamelCase )
__lowerCamelCase = do_lower_case
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 175 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( A__ : str , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : List[Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __a ( A__ : Any , A__ : str , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
SCREAMING_SNAKE_CASE = requests.get(A__ , headers=A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = result.headers["Location"]
SCREAMING_SNAKE_CASE = requests.get(A__ , allow_redirects=A__ )
SCREAMING_SNAKE_CASE = os.path.join(A__ , F"{artifact_name}.zip" )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def __a ( A__ : List[Any] , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE = line[: line.index(": " )]
SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` "
F"and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
SCREAMING_SNAKE_CASE = None
if job_name and job_links:
SCREAMING_SNAKE_CASE = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __a ( A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __a ( A__ : List[str] , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : str ):
SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
SCREAMING_SNAKE_CASE = None
return test
def __a ( A__ : List[str] , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE = {x[2] for x in logs}
SCREAMING_SNAKE_CASE = {}
for test in tests:
SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE = counter.most_common()
SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = "| no. | error | status |"
SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
SCREAMING_SNAKE_CASE = F"| {count} | {error[:100]} | |"
lines.append(A__ )
return "\n".join(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__A : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A : int = get_job_links(args.workflow_run_id, token=args.token)
__A : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A : Union[str, Any] = k.find(' / ')
__A : Optional[int] = k[index + len(' / ') :]
__A : Optional[int] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A : Optional[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A : str = reduce_by_error(errors)
__A : int = reduce_by_model(errors)
__A : Any = make_github_table(reduced_by_error)
__A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 16 | 0 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( __snake_case : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def _SCREAMING_SNAKE_CASE ( __snake_case : np.ndarray ):
return vector * sigmoid(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case = CodeGenTokenizer
snake_case = CodeGenTokenizerFast
snake_case = True
snake_case = {'add_prefix_space': True}
snake_case = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase = {'unk_token': '<unk>'}
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **__snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCamelCase__ ( self : List[str] , **__snake_case : Optional[int] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 'lower newer'
lowerCamelCase = 'lower newer'
return input_text, output_text
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCamelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase = 'lower newer'
lowerCamelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase = tokens + [tokenizer.unk_token]
lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase )
lowerCamelCase = 'lower newer'
# Testing tokenization
lowerCamelCase = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCamelCase = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing conversion to ids without special tokens
lowerCamelCase = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCamelCase = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing conversion to ids with special tokens
lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase )
lowerCamelCase = tokenizer.encode(__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCamelCase = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing the unknown token
lowerCamelCase = tokens + [rust_tokenizer.unk_token]
lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( self : Tuple , *__snake_case : Dict , **__snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# Simple input
lowerCamelCase = 'This is a simple input'
lowerCamelCase = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase = ('This is a simple input', 'This is a pair')
lowerCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase = 'This is a simple input'
lowerCamelCase = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase = ('This is a simple input', 'This is a pair')
lowerCamelCase = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase = tokenizer.pad_token_id
lowerCamelCase = tokenizer(__lowerCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors='np' )
lowerCamelCase = tokenizer(*__lowerCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = '$$$'
lowerCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCamelCase , add_bos_token=__lowerCamelCase )
lowerCamelCase = 'This is a simple input'
lowerCamelCase = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase = tokenizer.bos_token_id
lowerCamelCase = tokenizer(__lowerCamelCase )
lowerCamelCase = tokenizer(__lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase = tokenizer.decode(out_s.input_ids )
lowerCamelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
lowerCamelCase = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowerCamelCase = '\nif len_a > len_b: result = a\nelse: result = b'
lowerCamelCase = tokenizer.encode(__lowerCamelCase )
lowerCamelCase = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^\"\"\"', '\n\n\n']
lowerCamelCase = tokenizer.decode(__lowerCamelCase , truncate_before_pattern=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
pass
| 246 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _a ( __snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def _UpperCAmelCase ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = PegasusTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> str:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return ("This is a test", "This is a test")
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = '</s>'
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(__lowerCamelCase ) , 1103 )
def _UpperCAmelCase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase_ = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
UpperCamelCase_ = rust_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
UpperCamelCase_ = py_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCamelCase_ = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
UpperCamelCase_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCamelCase_ = tokenizer([raw_input_str] , return_tensors=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCamelCase_ = 'To ensure a smooth flow of bank resolutions.'
UpperCamelCase_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCamelCase_ = tokenizer([raw_input_str] , return_tensors=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = ['This is going to be way too long.' * 150, 'short example']
UpperCamelCase_ = ['not super long but more than 5 tokens', 'tiny']
UpperCamelCase_ = self._large_tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='pt' )
UpperCamelCase_ = self._large_tokenizer(
text_target=__lowerCamelCase , max_length=5 , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def _UpperCAmelCase ( self ) -> str:
# fmt: off
UpperCamelCase_ = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class _a ( __snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def _UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = PegasusTokenizer(__lowerCamelCase , offset=0 , mask_token_sent=__lowerCamelCase , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ) -> List[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[Any]:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[Any]:
return ("This is a test", "This is a test")
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase_ = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
UpperCamelCase_ = rust_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
UpperCamelCase_ = py_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_torch
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = ['This is going to be way too long.' * 1000, 'short example']
UpperCamelCase_ = ['not super long but more than 5 tokens', 'tiny']
UpperCamelCase_ = self._large_tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='pt' )
UpperCamelCase_ = self._large_tokenizer(
text_target=__lowerCamelCase , max_length=5 , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCamelCase ) == 2 # input_ids, attention_mask.
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
UpperCamelCase_ = self._large_tokenizer(__lowerCamelCase ).input_ids
self.assertListEqual(
__lowerCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 23 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Any = ShapEPipeline
_lowercase : int = ['''prompt''']
_lowercase : List[str] = ['''prompt''']
_lowercase : Union[str, Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[Any] = False
@property
def lowerCAmelCase_ ( self : List[str] ):
return 32
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : str ):
return 8
@property
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def lowerCAmelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
__A : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__A : List[str] = PriorTransformer(**__A )
return model
@property
def lowerCAmelCase_ ( self : Dict ):
torch.manual_seed(0 )
__A : Any = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__A : Tuple = ShapERenderer(**__A )
return model
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Union[str, Any] = self.dummy_prior
__A : List[Any] = self.dummy_text_encoder
__A : Union[str, Any] = self.dummy_tokenizer
__A : Tuple = self.dummy_renderer
__A : Any = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__A , clip_sample=__A , clip_sample_range=1.0 , )
__A : Tuple = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase_ ( self : List[Any] , __A : str , __A : Any=0 ):
if str(__A ).startswith("""mps""" ):
__A : int = torch.manual_seed(__A )
else:
__A : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
__A : Optional[int] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = """cpu"""
__A : Tuple = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**__A )
__A : List[str] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : Tuple = pipe(**self.get_dummy_inputs(__A ) )
__A : Tuple = output.images[0]
__A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : List[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self : Dict ):
__A : List[str] = torch_device == """cpu"""
__A : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__A , relax_max_difference=__A , )
def lowerCAmelCase_ ( self : Dict ):
__A : List[Any] = self.get_dummy_components()
__A : List[str] = self.pipeline_class(**__A )
__A : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : List[Any] = 1
__A : int = 2
__A : int = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[Any] = batch_size * [inputs[key]]
__A : Tuple = pipe(**__A , num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
__A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__A : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__A : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__A : int = torch.Generator(device=__A ).manual_seed(0 )
__A : str = pipe(
"""a shark""" , generator=__A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__A , __A )
| 17 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : bool = False ) -> str:
if not isinstance(a__ ,a__ ):
__A : Dict = f"""Expected string as input, found {type(a__ )}"""
raise ValueError(a__ )
if not isinstance(a__ ,a__ ):
__A : Optional[int] = f"""Expected boolean as use_pascal parameter, found {type(a__ )}"""
raise ValueError(a__ )
__A : Any = input_str.split("""_""" )
__A : Optional[int] = 0 if use_pascal else 1
__A : Optional[Any] = words[start_index:]
__A : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
__A : List[str] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = len(a__ )
for i in range(length - 1 ):
__A : Optional[Any] = i
for k in range(i + 1 ,a__ ):
if collection[k] < collection[least]:
__A : Dict = k
if least != i:
__A , __A : str = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 17 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCamelCase_ :
pass
| 17 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 1 |
from __future__ import annotations
import pandas as pd
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : list[int] ,a__ : int ) -> list[int]:
__A : List[str] = [0] * no_of_processes
__A : Dict = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(a__ ):
__A : Tuple = burst_time[i]
__A : Dict = 0
__A : Any = 0
__A : List[Any] = 999999999
__A : List[str] = 0
__A : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(a__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__A : List[Any] = remaining_time[j]
__A : Union[str, Any] = j
__A : int = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__A : int = remaining_time[short]
if minm == 0:
__A : List[str] = 999999999
if remaining_time[short] == 0:
complete += 1
__A : Optional[Any] = False
# Find finish time of current process
__A : Union[str, Any] = increment_time + 1
# Calculate waiting time
__A : Tuple = finish_time - arrival_time[short]
__A : Tuple = finar - burst_time[short]
if waiting_time[short] < 0:
__A : str = 0
# Increment time
increment_time += 1
return waiting_time
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ,a__ : list[int] ) -> list[int]:
__A : Any = [0] * no_of_processes
for i in range(a__ ):
__A : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : list[int] ,a__ : int ) -> None:
__A : int = 0
__A : Any = 0
for i in range(a__ ):
__A : Dict = total_waiting_time + waiting_time[i]
__A : List[str] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
UpperCAmelCase_ : Tuple = int(input())
UpperCAmelCase_ : Dict = [0] * no_of_processes
UpperCAmelCase_ : Union[str, Any] = [0] * no_of_processes
UpperCAmelCase_ : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
UpperCAmelCase_ , UpperCAmelCase_ : Dict = map(int, input().split())
UpperCAmelCase_ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase_ : Union[str, Any] = burst_time
UpperCAmelCase_ : Any = no_of_processes
UpperCAmelCase_ : int = waiting_time
UpperCAmelCase_ : Optional[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase_ : Tuple = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 17 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 1 |
from __future__ import annotations
from typing import Any
class lowerCamelCase_ ( _lowercase ):
pass
class lowerCamelCase_ :
def __init__( self : Optional[int] , __A : Any ):
__A : Any = data
__A : Node | None = None
def __iter__( self : Dict ):
__A : Optional[Any] = self
__A : Optional[int] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__A : Optional[Any] = node.next_node
@property
def lowerCAmelCase_ ( self : Tuple ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = Node(1)
UpperCAmelCase_ : List[Any] = Node(2)
UpperCAmelCase_ : Optional[int] = Node(3)
UpperCAmelCase_ : List[Any] = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ : str = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ : int = Node(5)
UpperCAmelCase_ : Dict = Node(6)
UpperCAmelCase_ : Any = Node(5)
UpperCAmelCase_ : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ : Union[str, Any] = Node(1)
print(root_node.has_loop) # False
| 17 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
__A : str = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if number < 1:
__A : Optional[int] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(a__ )
__A : Optional[Any] = 1
for i in range(1 ,a__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list ,a__ : int | None = None ,a__ : int | None = None ) -> None:
if start is None:
__A : int = 0
if end is None:
__A : Union[str, Any] = len(a__ ) - 1
if start >= end:
return
__A : Optional[int] = (start + end) // 2
slowsort(a__ ,a__ ,a__ )
slowsort(a__ ,mid + 1 ,a__ )
if sequence[end] < sequence[mid]:
__A , __A : Dict = sequence[mid], sequence[end]
slowsort(a__ ,a__ ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ) -> bool:
__A : Union[str, Any] = len(a__ )
__A : List[str] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__A : Any = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__A : Optional[Any] = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__A : Union[str, Any] = subset[i - 1][j]
if arr[i - 1] <= j:
__A : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ) -> Any:
__A : Dict = checkpoints.load_tax_checkpoint(a__ )
__A : List[str] = flatten_dict(a__ )
return flax_params
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ) -> List[Any]:
__A : Union[str, Any] = {}
__A : Union[str, Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
__A : Dict = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__A : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__A : List[str] = new_key.replace(a__ ,a__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__A : Optional[Any] = new_key.replace(a__ ,a__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__A : Optional[Any] = re.sub(r"""layers_(\d+)""" ,r"""layer.\1""" ,a__ )
__A : Any = new_key.replace("""encoder""" ,"""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__A : int = re.sub(r"""layers_(\d+)""" ,r"""layer.\1""" ,a__ )
__A : Optional[int] = flax_dict[key]
__A : Union[str, Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__A : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
__A : List[str] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Union[str, Any] ,a__ : Optional[Any]=False ,a__ : Union[str, Any]=False ) -> Union[str, Any]:
__A : Any = get_flax_param(a__ )
if not use_large:
__A : Union[str, Any] = PixaStructVisionConfig()
__A : str = PixaStructTextConfig()
else:
__A : List[str] = PixaStructVisionConfig(
hidden_size=1536 ,d_ff=3968 ,num_attention_heads=24 ,num_hidden_layers=18 )
__A : int = PixaStructTextConfig(hidden_size=1536 ,d_ff=3968 ,num_heads=24 ,num_layers=18 )
__A : Union[str, Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=a__ )
__A : int = PixaStructForConditionalGeneration(a__ )
__A : str = rename_and_convert_flax_params(a__ )
model.load_state_dict(a__ )
__A : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
__A : str = PixaStructImageProcessor()
__A : Union[str, Any] = PixaStructProcessor(image_processor=a__ ,tokenizer=a__ )
if use_large:
__A : int = 4096
__A : Dict = True
# mkdir if needed
os.makedirs(a__ ,exist_ok=a__ )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
print("""Model saved in {}""".format(a__ ) )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 17 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase_ : List[Any] = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , __A : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
__A : Union[str, Any] = primes[group]["""prime"""]
__A : List[str] = primes[group]["""generator"""]
__A : int = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase_ ( self : List[str] ):
return hex(self.__private_key )[2:]
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Any = pow(self.generator , self.__private_key , self.prime )
return hex(__A )[2:]
def lowerCAmelCase_ ( self : str , __A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__A , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase_ ( self : Optional[int] , __A : str ):
__A : List[str] = int(__A , base=16 )
if not self.is_valid_public_key(__A ):
raise ValueError("""Invalid public key""" )
__A : Union[str, Any] = pow(__A , self.__private_key , self.prime )
return shaaaa(str(__A ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase_ ( __A : int , __A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__A , (prime - 1) // 2 , __A ) == 1
)
@staticmethod
def lowerCAmelCase_ ( __A : str , __A : str , __A : int = 14 ):
__A : int = int(__A , base=16 )
__A : List[str] = int(__A , base=16 )
__A : Union[str, Any] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__A , __A ):
raise ValueError("""Invalid public key""" )
__A : int = pow(__A , __A , __A )
return shaaaa(str(__A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 1 |
import numpy as np
class lowerCamelCase_ :
def __init__( self : Union[str, Any] ):
__A : Union[str, Any] = (0, 0)
__A : Optional[Any] = None
__A : int = 0
__A : List[Any] = 0
__A : Any = 0
def __eq__( self : str , __A : Dict ):
return self.position == cell.position
def lowerCAmelCase_ ( self : int ):
print(self.position )
class lowerCamelCase_ :
def __init__( self : Dict , __A : List[str]=(5, 5) ):
__A : str = np.zeros(__A )
__A : str = world_size[0]
__A : Union[str, Any] = world_size[1]
def lowerCAmelCase_ ( self : Dict ):
print(self.w )
def lowerCAmelCase_ ( self : int , __A : str ):
__A : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__A : Optional[int] = cell.position[0]
__A : List[str] = cell.position[1]
__A : Optional[int] = []
for n in neughbour_cord:
__A : Optional[int] = current_x + n[0]
__A : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__A : int = Cell()
__A : Optional[Any] = (x, y)
__A : Optional[Any] = cell
neighbours.append(__A )
return neighbours
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Optional[int] ,a__ : List[str] ) -> Any:
__A : Dict = []
__A : Dict = []
_open.append(a__ )
while _open:
__A : List[str] = np.argmin([n.f for n in _open] )
__A : Dict = _open[min_f]
_closed.append(_open.pop(a__ ) )
if current == goal:
break
for n in world.get_neigbours(a__ ):
for c in _closed:
if c == n:
continue
__A : Tuple = current.g + 1
__A , __A : int = n.position
__A , __A : str = goal.position
__A : Optional[int] = (ya - ya) ** 2 + (xa - xa) ** 2
__A : Any = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a__ )
__A : Any = []
while current.parent is not None:
path.append(current.position )
__A : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = Gridworld()
# Start position and goal
UpperCAmelCase_ : Any = Cell()
UpperCAmelCase_ : Tuple = (0, 0)
UpperCAmelCase_ : Tuple = Cell()
UpperCAmelCase_ : str = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
UpperCAmelCase_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Optional[int] = 1
print(world.w)
| 17 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 1 |
import torch
from diffusers import DiffusionPipeline
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Optional[int] , __A : Optional[Any] , __A : Dict ):
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
def __call__( self : List[Any] ):
__A : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__A : List[str] = 1
__A : Union[str, Any] = self.unet(__A , __A ).sample
__A : Union[str, Any] = self.scheduler.step(__A , __A , __A ).prev_sample
__A : int = scheduler_output - scheduler_output + torch.ones_like(__A )
return result
| 17 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[str] ):
__A : Union[str, Any] = 0
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__A : Optional[Any] = AutoTokenizer.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__A : int = AutoTokenizer.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__A ) , 0 )
def lowerCAmelCase_ ( self : List[str] ):
__A : List[str] = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : int = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase_ ( self : List[str] ):
__A : int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
# Check that tokenizer_type ≠ model_type
__A : str = AutoTokenizer.from_pretrained(__A , config=__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__A , """vocab.txt""" ) )
__A : List[str] = AutoTokenizer.from_pretrained(__A , tokenizer_type="""bert""" , use_fast=__A )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__A , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__A , """merges.txt""" ) )
__A : int = AutoTokenizer.from_pretrained(__A , tokenizer_type="""gpt2""" , use_fast=__A )
self.assertIsInstance(__A , __A )
@require_tokenizers
def lowerCAmelCase_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__A , """vocab.txt""" ) )
__A : List[Any] = AutoTokenizer.from_pretrained(__A , tokenizer_type="""bert""" )
self.assertIsInstance(__A , __A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__A , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__A , """merges.txt""" ) )
__A : int = AutoTokenizer.from_pretrained(__A , tokenizer_type="""gpt2""" )
self.assertIsInstance(__A , __A )
def lowerCAmelCase_ ( self : List[Any] ):
with pytest.raises(__A ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase_ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__A : List[Any] = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
if isinstance(__A , __A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __A )
else:
self.assertEqual(tokenizer.do_lower_case , __A )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCAmelCase_ ( self : Tuple ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__A , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__A : Union[str, Any] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase_ ( self : List[str] ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__A : Union[str, Any] = TOKENIZER_MAPPING.values()
__A : int = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__A )
@require_tokenizers
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__A ) , __A )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __A )
@require_tokenizers
def lowerCAmelCase_ ( self : Any ):
__A : List[str] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__A )
__A : Dict = """Hello, world. How are you?"""
__A : List[Any] = tokenizer.tokenize(__A )
self.assertEqual("""[UNK]""" , tokens[0] )
__A : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__A )
__A : str = tokenizer.tokenize(__A )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase_ ( self : str ):
__A : Optional[Any] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__A ) , __A )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Dict = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__A : int = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase_ ( self : Dict ):
__A : List[Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__A , __A )
def lowerCAmelCase_ ( self : Any ):
# Check we can load the tokenizer config of an online model.
__A : List[Any] = get_tokenizer_config("""bert-base-cased""" )
__A : int = config.pop("""_commit_hash""" , __A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__A , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__A : Optional[Any] = get_tokenizer_config(__A )
self.assertDictEqual(__A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__A : str = AutoTokenizer.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__A : List[Any] = get_tokenizer_config(__A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase_ ( self : List[str] ):
try:
AutoConfig.register("""custom""" , __A )
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
__A : Union[str, Any] = CustomTokenizer.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__A : Union[str, Any] = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase_ ( self : str ):
try:
AutoConfig.register("""custom""" , __A )
# Can register in two steps
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__A , slow_tokenizer_class=__A , fast_tokenizer_class=__A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[str] = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
__A : List[str] = CustomTokenizerFast.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__A : Tuple = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__A : List[str] = AutoTokenizer.from_pretrained(__A , use_fast=__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : Optional[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__A : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__A : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__A )
__A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__A : Tuple = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__A : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__A , use_fast=__A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A )
__A : Dict = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A , use_fast=__A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase_ ( self : Any ):
class lowerCamelCase_ ( _lowercase ):
_lowercase : int = False
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = NewTokenizer
_lowercase : Dict = False
try:
AutoConfig.register("""custom""" , __A )
AutoTokenizer.register(__A , slow_tokenizer_class=__A )
AutoTokenizer.register(__A , fast_tokenizer_class=__A )
# If remote code is not set, the default is to use local
__A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__A : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__A : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__A : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__A , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__A : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__A , use_fast=__A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : str ):
__A : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__A : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__A , use_fast=__A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
__A : List[Any] = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase_ ( self : str ):
with self.assertRaisesRegex(
__A , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__A : Dict = AutoTokenizer.from_pretrained(__A , revision="""aaaaaa""" )
def lowerCAmelCase_ ( self : int ):
# Make sure we have cached the tokenizer.
__A : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__A : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 17 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : int , __A : int , __A : Optional[Any]=13 , __A : Any=7 , __A : Any=True , __A : List[Any]=True , __A : str=True , __A : int=True , __A : Optional[Any]=99 , __A : Dict=32 , __A : int=5 , __A : Optional[int]=4 , __A : Dict=37 , __A : List[str]="gelu" , __A : str=0.1 , __A : int=0.1 , __A : Optional[int]=512 , __A : Optional[int]=16 , __A : List[str]=2 , __A : List[Any]=0.0_2 , __A : int=4 , ):
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : int = seq_length
__A : Any = is_training
__A : Optional[int] = use_attention_mask
__A : List[Any] = use_token_type_ids
__A : Optional[int] = use_labels
__A : Optional[Any] = vocab_size
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : Optional[int] = hidden_act
__A : Any = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : Tuple = type_vocab_size
__A : List[str] = type_sequence_label_size
__A : List[str] = initializer_range
__A : List[Any] = num_choices
def lowerCAmelCase_ ( self : List[Any] ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_attention_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Any = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A , __A , __A , __A : Tuple = config_and_inputs
__A : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
__A : Tuple = self.prepare_config_and_inputs()
__A , __A , __A , __A : List[Any] = config_and_inputs
__A : Optional[int] = True
__A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : str = True
_lowercase : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : List[str] ):
__A : List[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
__A : List[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : int ):
__A : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : str = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : List[Any] = model(__A )[0]
__A : str = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __A )
# compare the actual values for a slice.
__A : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Any ):
__A : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : Optional[int] = model(__A )[0]
# compare the actual values for a slice.
__A : Optional[int] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 17 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __SCREAMING_SNAKE_CASE ( a__ : Any ) -> List[Any]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Tuple:
__A : Union[str, Any] = create_tensor(a__ )
__A : Any = gather(a__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Any:
__A : Optional[Any] = [state.process_index]
__A : Union[str, Any] = gather_object(a__ )
assert len(a__ ) == state.num_processes, f"""{gathered_obj}, {len(a__ )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[int]:
__A : List[str] = create_tensor(a__ )
__A : Tuple = broadcast(a__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> Any:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__A : Any = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__A : str = torch.arange(state.num_processes ).to(state.device )
__A : int = pad_across_processes(a__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Union[str, Any] = create_tensor(a__ )
__A : str = reduce(a__ ,"""sum""" )
__A : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(a__ ,a__ ), f"""{reduced_tensor} != {truth_tensor}"""
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Any:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Optional[Any] = create_tensor(a__ )
__A : Optional[Any] = reduce(a__ ,"""mean""" )
__A : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(a__ ,a__ ), f"""{reduced_tensor} != {truth_tensor}"""
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = PartialState()
state.print(f"""State: {state}""" )
state.print("""testing gather""" )
test_gather(a__ )
state.print("""testing gather_object""" )
test_gather_object(a__ )
state.print("""testing broadcast""" )
test_broadcast(a__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(a__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(a__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(a__ )
if __name__ == "__main__":
main()
| 17 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Tuple:
__A : Optional[Any] = git.Repo(search_parent_directories=a__ )
__A : str = {
"""repo_id""": str(a__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(a__ ,"""git_log.json""" ) ,"""w""" ) as f:
json.dump(a__ ,a__ ,indent=4 )
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Any:
if params.n_gpu <= 0:
__A : Dict = 0
__A : Dict = -1
__A : Dict = True
__A : Dict = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
__A : int = int(os.environ["""WORLD_SIZE"""] )
__A : Optional[int] = int(os.environ["""N_GPU_NODE"""] )
__A : Optional[int] = int(os.environ["""RANK"""] )
# number of nodes / node ID
__A : int = params.world_size // params.n_gpu_per_node
__A : Dict = params.global_rank // params.n_gpu_per_node
__A : Union[str, Any] = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
__A : int = 1
__A : List[Any] = 0
__A : str = 0
__A : List[str] = 0
__A : Tuple = 1
__A : Optional[Any] = 1
__A : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__A : Tuple = params.node_id == 0 and params.local_rank == 0
__A : str = params.n_nodes > 1
# summary
__A : Optional[int] = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" ,backend="""nccl""" ,)
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> str:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 17 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Tuple = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.