code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
UpperCAmelCase_ = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while place < len(SCREAMING_SNAKE_CASE_ ):
if (place + 1 < len(SCREAMING_SNAKE_CASE_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
for arabic, roman in ROMAN:
((_UpperCAmelCase) , (_UpperCAmelCase)) = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __UpperCamelCase ( A__ ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( A__ ):
__A : Dict = VOCAB_FILES_NAMES
__A : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase_ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A__ )
class __UpperCamelCase :
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
if titles is None and texts is None:
return super().__call__(
_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = titles if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [titles]
_UpperCAmelCase = texts if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [texts]
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = questions if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [questions] * n_passages
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_UpperCamelCase )} titles and {len(_UpperCamelCase )} texts.''' )
_UpperCAmelCase = super().__call__(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids''']
_UpperCAmelCase = super().__call__(_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids''']
_UpperCAmelCase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCamelCase , _UpperCamelCase )
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase = attention_mask
return self.pad(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = 64 , _UpperCamelCase = 4 , ):
_UpperCAmelCase = reader_input['''input_ids''']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = sorted(range(_UpperCamelCase ) , reverse=_UpperCamelCase , key=relevance_logits.__getitem__ )
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCamelCase , top_spans=_UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCamelCase , start_index=_UpperCamelCase , end_index=_UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = []
for start_index, start_score in enumerate(_UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
_UpperCAmelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __UpperCamelCase ( A__ , A__ ):
__A : Dict = VOCAB_FILES_NAMES
__A : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
__A : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION
__A : Tuple = ["""input_ids""", """attention_mask"""] | 32 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , ):
super().__init__()
self.register_modules(transformer=_UpperCamelCase , vae=_UpperCamelCase , scheduler=_UpperCamelCase )
# create a imagenet -> id dictionary for easier use
_UpperCAmelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
_UpperCAmelCase = int(_UpperCamelCase )
_UpperCAmelCase = dict(sorted(self.labels.items() ) )
def UpperCamelCase( self , _UpperCamelCase ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = list(_UpperCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase = 4.0 , _UpperCamelCase = None , _UpperCamelCase = 50 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ):
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = self.transformer.config.sample_size
_UpperCAmelCase = self.transformer.config.in_channels
_UpperCAmelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_UpperCamelCase , device=self.device , dtype=self.transformer.dtype , )
_UpperCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_UpperCAmelCase = torch.tensor(_UpperCamelCase , device=self.device ).reshape(-1 )
_UpperCAmelCase = torch.tensor([1000] * batch_size , device=self.device )
_UpperCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_UpperCAmelCase = latent_model_input[: len(_UpperCamelCase ) // 2]
_UpperCAmelCase = torch.cat([half, half] , dim=0 )
_UpperCAmelCase = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = t
if not torch.is_tensor(_UpperCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCAmelCase = latent_model_input.device.type == '''mps'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = torch.floataa if is_mps else torch.floataa
else:
_UpperCAmelCase = torch.intaa if is_mps else torch.intaa
_UpperCAmelCase = torch.tensor([timesteps] , dtype=_UpperCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_UpperCAmelCase = self.transformer(
_UpperCamelCase , timestep=_UpperCamelCase , class_labels=_UpperCamelCase ).sample
# perform guidance
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCAmelCase , _UpperCAmelCase = torch.split(_UpperCamelCase , len(_UpperCamelCase ) // 2 , dim=0 )
_UpperCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCAmelCase = torch.cat([half_eps, half_eps] , dim=0 )
_UpperCAmelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCAmelCase , _UpperCAmelCase = torch.split(_UpperCamelCase , _UpperCamelCase , dim=1 )
else:
_UpperCAmelCase = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase = latent_model_input.chunk(2 , dim=0 )
else:
_UpperCAmelCase = latent_model_input
_UpperCAmelCase = 1 / self.vae.config.scaling_factor * latents
_UpperCAmelCase = self.vae.decode(_UpperCamelCase ).sample
_UpperCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_UpperCamelCase ) | 32 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 1 |
import copy
import re
class __UpperCamelCase :
__A : Any = """hp"""
__A : Optional[int] = {}
__A : Tuple = None
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = prefix
_UpperCAmelCase = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ):
if len(_UpperCamelCase ) == 0:
return ""
_UpperCAmelCase = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCamelCase ) + 1 ):
_UpperCAmelCase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCamelCase ):
_UpperCAmelCase = ''''''
while integer != 0:
_UpperCAmelCase = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
_UpperCAmelCase = 0
while True:
_UpperCAmelCase = word + '''#''' + int_to_alphabetic(_UpperCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase = sword
break
_UpperCAmelCase = short_word
_UpperCAmelCase = word
return short_word
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = param_name.split('''_''' )
_UpperCAmelCase = [TrialShortNamer.shortname_for_word(_UpperCamelCase , _UpperCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_UpperCAmelCase = ['''''', '''_''']
for separator in separators:
_UpperCAmelCase = separator.join(_UpperCamelCase )
if shortname not in info["reverse_short_param"]:
_UpperCAmelCase = shortname
_UpperCAmelCase = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TrialShortNamer.shortname_for_key(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = short_name
_UpperCAmelCase = param_name
@classmethod
def UpperCamelCase( cls ):
if cls.NAMING_INFO is not None:
return
_UpperCAmelCase = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
_UpperCAmelCase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = info
@classmethod
def UpperCamelCase( cls , _UpperCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
_UpperCAmelCase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_UpperCAmelCase = cls.NAMING_INFO['''short_param'''][k]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = 1 if v else 0
_UpperCAmelCase = '''''' if isinstance(_UpperCamelCase , (int, float) ) else '''-'''
_UpperCAmelCase = f'''{key}{sep}{v}'''
name.append(_UpperCamelCase )
return "_".join(_UpperCamelCase )
@classmethod
def UpperCamelCase( cls , _UpperCamelCase ):
_UpperCAmelCase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_UpperCAmelCase = []
else:
_UpperCAmelCase = repr.split('''_''' )
_UpperCAmelCase = {}
for value in values:
if "-" in value:
_UpperCAmelCase , _UpperCAmelCase = value.split('''-''' )
else:
_UpperCAmelCase = re.sub('''[0-9.]''' , '''''' , _UpperCamelCase )
_UpperCAmelCase = float(re.sub('''[^0-9.]''' , '''''' , _UpperCamelCase ) )
_UpperCAmelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k]
_UpperCAmelCase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_UpperCAmelCase = cls.DEFAULTS[k]
return parameters | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import doctest
from collections import deque
import numpy as np
class __UpperCamelCase :
def __init__( self ):
_UpperCAmelCase = [2, 1, 2, -1]
_UpperCAmelCase = [1, 2, 3, 4]
def UpperCamelCase( self ):
_UpperCAmelCase = len(self.first_signal )
_UpperCAmelCase = len(self.second_signal )
_UpperCAmelCase = max(_UpperCamelCase , _UpperCamelCase )
# create a zero matrix of max_length x max_length
_UpperCAmelCase = [[0] * max_length for i in range(_UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCamelCase ):
_UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(_UpperCamelCase )
for j, item in enumerate(_UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
_UpperCAmelCase = np.matmul(np.transpose(_UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example['''content'''] , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
_UpperCAmelCase = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
UpperCAmelCase_ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ = multiprocessing.cpu_count()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 32 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCamelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__A : int = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
_UpperCAmelCase = CursorInfo()
_UpperCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def A__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
_UpperCAmelCase = CursorInfo()
_UpperCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def A__ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor() | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __UpperCamelCase ( A__ ):
__A : int = """xglm"""
__A : Tuple = ["""past_key_values"""]
__A : Union[str, Any] = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _UpperCamelCase=256008 , _UpperCamelCase=2048 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = num_layers
_UpperCAmelCase = attention_heads
_UpperCAmelCase = activation_function
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = init_std
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = use_cache
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , ) | 32 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
import json
import sys
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = results[benchmark_name]
_UpperCAmelCase = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
_UpperCAmelCase = '''| metric |'''
_UpperCAmelCase = '''|--------|'''
_UpperCAmelCase = '''| new / old (diff) |'''
for metric_name in sorted(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = benchmark_res[metric_name]
_UpperCAmelCase = metric_vals['''new''']
_UpperCAmelCase = metric_vals.get('''old''' , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = metric_vals.get('''diff''' , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = F''' {new_val:f}''' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
UpperCAmelCase_ = sys.argv[1]
UpperCAmelCase_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 32 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = torch.permute(SCREAMING_SNAKE_CASE_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ):
# linear layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "metadata" in layer:
_UpperCAmelCase = layer.split('''metadata''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_UpperCAmelCase = layer.split('''kvstore''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_UpperCAmelCase = layer.split('''/''' )
_UpperCAmelCase = '''/'''.join(split_layer[:-1] )
_UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_UpperCAmelCase = '''file'''
else:
_UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = rename_keys(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = {}
for k, v in current_block.items():
_UpperCAmelCase = v
_UpperCAmelCase = new_current_block
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str = WEIGHTS_NAME ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
_UpperCAmelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_UpperCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
_UpperCAmelCase = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if curr_real_layer_name in all_layers:
_UpperCAmelCase = content
else:
_UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase , _UpperCAmelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = '''/'''.join(SCREAMING_SNAKE_CASE_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = shard
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {'''total_size''': total_size}
_UpperCAmelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''w''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__ ( ) -> Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
_UpperCAmelCase = TaTokenizer.from_pretrained('''t5-small''' )
_UpperCAmelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_ids
_UpperCAmelCase = model.generate(SCREAMING_SNAKE_CASE_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 32 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["MobileNetV2FeatureExtractor"]
UpperCAmelCase_ = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 1 |
from collections.abc import Iterable
from typing import Any
class __UpperCamelCase :
def __init__( self , _UpperCamelCase = None ):
_UpperCAmelCase = value
_UpperCAmelCase = None # Added in order to delete a node easier
_UpperCAmelCase = None
_UpperCAmelCase = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class __UpperCamelCase :
def __init__( self , _UpperCamelCase = None ):
_UpperCAmelCase = root
def __str__( self ):
return str(self.root )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
if new_children is not None: # reset its kids
_UpperCAmelCase = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_UpperCamelCase ): # If it is the right children
_UpperCAmelCase = new_children
else:
_UpperCAmelCase = new_children
else:
_UpperCAmelCase = new_children
def UpperCamelCase( self , _UpperCamelCase ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCamelCase( self ):
return self.root is None
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = Node(_UpperCamelCase ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCAmelCase = new_node # set its root
else: # Tree is not empty
_UpperCAmelCase = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCAmelCase = new_node # We insert the new node in a leaf
break
else:
_UpperCAmelCase = parent_node.left
else:
if parent_node.right is None:
_UpperCAmelCase = new_node
break
else:
_UpperCAmelCase = parent_node.right
_UpperCAmelCase = parent_node
def UpperCamelCase( self , *_UpperCamelCase ):
for value in values:
self.__insert(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCAmelCase = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCAmelCase = node.left if value < node.value else node.right
return node
def UpperCamelCase( self , _UpperCamelCase = None ):
if node is None:
if self.root is None:
return None
_UpperCAmelCase = self.root
if not self.empty():
while node.right is not None:
_UpperCAmelCase = node.right
return node
def UpperCamelCase( self , _UpperCamelCase = None ):
if node is None:
_UpperCAmelCase = self.root
if self.root is None:
return None
if not self.empty():
_UpperCAmelCase = self.root
while node.left is not None:
_UpperCAmelCase = node.left
return node
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.search(_UpperCamelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_UpperCamelCase , _UpperCamelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_UpperCamelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_UpperCamelCase , node.left )
else:
_UpperCAmelCase = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCAmelCase = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCamelCase( self , _UpperCamelCase ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCamelCase( self , _UpperCamelCase=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
if node:
self.inorder(_UpperCamelCase , node.left )
arr.append(node.value )
self.inorder(_UpperCamelCase , node.right )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = []
self.inorder(_UpperCamelCase , _UpperCamelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def A__ ( SCREAMING_SNAKE_CASE_ : Node | None ) -> list[Node]:
"""simple docstring"""
_UpperCAmelCase = []
if curr_node is not None:
_UpperCAmelCase = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCAmelCase = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE_ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE_ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 32 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = []
for i in range(1 , n + 1 ):
_UpperCAmelCase = prev_numerator + 2 * prev_denominator
_UpperCAmelCase = prev_numerator + prev_denominator
if len(str(SCREAMING_SNAKE_CASE_ ) ) > len(str(SCREAMING_SNAKE_CASE_ ) ):
result.append(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = numerator
_UpperCAmelCase = denominator
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __UpperCamelCase :
__A : Optional[Union[str, Path]] = None
__A : bool = False
__A : bool = False
__A : bool = False
__A : Optional[Dict] = None
__A : Optional[str] = None
__A : bool = False
__A : bool = False
__A : bool = False
__A : bool = True
__A : Optional[int] = None
__A : int = 1
__A : Optional[Union[str, bool]] = None
__A : bool = False
__A : Optional[Dict] = None
__A : Optional[str] = None
def UpperCamelCase( self ):
return self.__class__(**{k: copy.deepcopy(_UpperCamelCase ) for k, v in self.__dict__.items()} ) | 32 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=8 ) -> int:
"""simple docstring"""
_UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
_UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if latents is None:
_UpperCAmelCase = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCAmelCase = latents.to(_UpperCamelCase )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase( self , _UpperCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
_UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 512 , _UpperCamelCase = 512 , _UpperCamelCase = 100 , _UpperCamelCase = 4.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ):
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = torch.cat(_UpperCamelCase , dim=0 )
_UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
_UpperCAmelCase = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.unet.config.in_channels
_UpperCAmelCase , _UpperCAmelCase = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = {'''image_embeds''': image_embeds}
_UpperCAmelCase = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase = variance_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
_UpperCAmelCase = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_UpperCAmelCase = image * 0.5 + 0.5
_UpperCAmelCase = image.clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase ) | 32 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __UpperCamelCase ( A__ , A__ ):
__A : Union[str, Any] = """focalnet"""
def __init__( self , _UpperCamelCase=224 , _UpperCamelCase=4 , _UpperCamelCase=3 , _UpperCamelCase=96 , _UpperCamelCase=False , _UpperCamelCase=[192, 384, 768, 768] , _UpperCamelCase=[2, 2, 6, 2] , _UpperCamelCase=[2, 2, 2, 2] , _UpperCamelCase=[3, 3, 3, 3] , _UpperCamelCase="gelu" , _UpperCamelCase=4.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=False , _UpperCamelCase=1e-4 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names ) | 32 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
# Function to print upper half of diamond (pyramid)
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Any:
"""simple docstring"""
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
"""simple docstring"""
for i in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(SCREAMING_SNAKE_CASE_ ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE_ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
UpperCAmelCase_ = 1
while K:
UpperCAmelCase_ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
UpperCAmelCase_ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...") | 32 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCAmelCase = parser.parse_args()
return args.f
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( A__ ):
@classmethod
def UpperCamelCase( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) ) | 32 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase_ = 2_99_79_24_58
# Symbols
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = symbols("ct x y z")
def A__ ( SCREAMING_SNAKE_CASE_ : float ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def A__ ( SCREAMING_SNAKE_CASE_ : float ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def A__ ( SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : np.ndarray | None = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
_UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase_ = transform(29_97_92_45)
print("Example of four vector: ")
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
UpperCAmelCase_ = {ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''') | 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
__A : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__A : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__A : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=_UpperCamelCase , tokenizer=_UpperCamelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_UpperCamelCase , [
{'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_UpperCamelCase , [
{'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_UpperCamelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_UpperCamelCase ):
classifier(_UpperCamelCase , candidate_labels='''politics''' )
with self.assertRaises(_UpperCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_UpperCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_UpperCamelCase )
with self.assertRaises(_UpperCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_UpperCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_UpperCamelCase , )
self.run_entailment_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(_UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , ) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = grid[row_n]
_UpperCAmelCase = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCAmelCase_ = logging.getLogger(__name__)
class __UpperCamelCase ( A__ ):
__A : Optional[Any] = """summarization"""
__A : Union[str, Any] = ["""loss"""]
__A : Any = ROUGE_KEYS
__A : Any = """rouge2"""
def __init__( self , _UpperCamelCase , **_UpperCamelCase ):
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_UpperCamelCase , num_labels=_UpperCamelCase , mode=self.mode , **_UpperCamelCase )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
_UpperCAmelCase = Path(self.output_dir ) / '''metrics.json'''
_UpperCAmelCase = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(_UpperCamelCase )
_UpperCAmelCase = self.config.model_type
_UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
_UpperCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCAmelCase = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
_UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCAmelCase = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCAmelCase = get_git_info()['''repo_sha''']
_UpperCAmelCase = hparams.num_workers
_UpperCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCamelCase ):
_UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCAmelCase = self.decoder_start_token_id
_UpperCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
_UpperCAmelCase = False
_UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCAmelCase = self.hparams.eval_max_gen_length
else:
_UpperCAmelCase = self.model.config.max_length
_UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCamelCase , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
_UpperCAmelCase = True
return readable_batch
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
return self.model(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.tokenizer.batch_decode(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return lmap(str.strip , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.tokenizer.pad_token_id
_UpperCAmelCase , _UpperCAmelCase = batch['''input_ids'''], batch['''attention_mask''']
_UpperCAmelCase = batch['''labels''']
if isinstance(self.model , _UpperCamelCase ):
_UpperCAmelCase = self.model._shift_right(_UpperCamelCase )
else:
_UpperCAmelCase = shift_tokens_right(_UpperCamelCase , _UpperCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCAmelCase = decoder_input_ids
self.save_readable_batch(_UpperCamelCase )
_UpperCAmelCase = self(_UpperCamelCase , attention_mask=_UpperCamelCase , decoder_input_ids=_UpperCamelCase , use_cache=_UpperCamelCase )
_UpperCAmelCase = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCAmelCase = nn.functional.log_softmax(_UpperCamelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = label_smoothed_nll_loss(
_UpperCamelCase , _UpperCamelCase , self.hparams.label_smoothing , ignore_index=_UpperCamelCase )
return (loss,)
@property
def UpperCamelCase( self ):
return self.tokenizer.pad_token_id
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self._step(_UpperCamelCase )
_UpperCAmelCase = dict(zip(self.loss_names , _UpperCamelCase ) )
# tokens per batch
_UpperCAmelCase = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
_UpperCAmelCase = batch['''input_ids'''].shape[0]
_UpperCAmelCase = batch['''input_ids'''].eq(self.pad ).sum()
_UpperCAmelCase = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
return self._generative_step(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase="val" ):
self.step_count += 1
_UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCAmelCase = losses['''loss''']
_UpperCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
_UpperCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCamelCase )
_UpperCAmelCase = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_UpperCAmelCase = self.step_count
self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path
_UpperCAmelCase = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
return calculate_rouge(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCAmelCase = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCAmelCase = (time.time() - ta) / batch['''input_ids'''].shape[0]
_UpperCAmelCase = self.ids_to_clean_text(_UpperCamelCase )
_UpperCAmelCase = self.ids_to_clean_text(batch['''labels'''] )
_UpperCAmelCase = self._step(_UpperCamelCase )
_UpperCAmelCase = dict(zip(self.loss_names , _UpperCamelCase ) )
_UpperCAmelCase = self.calc_generative_metrics(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = np.mean(lmap(_UpperCamelCase , _UpperCamelCase ) )
base_metrics.update(gen_time=_UpperCamelCase , gen_len=_UpperCamelCase , preds=_UpperCamelCase , target=_UpperCamelCase , **_UpperCamelCase )
return base_metrics
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
return self._generative_step(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
return self.validation_epoch_end(_UpperCamelCase , prefix='''test''' )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.n_obs[type_path]
_UpperCAmelCase = self.target_lens[type_path]
_UpperCAmelCase = self.dataset_class(
self.tokenizer , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , max_target_length=_UpperCamelCase , **self.dataset_kwargs , )
return dataset
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
_UpperCAmelCase = self.get_dataset(_UpperCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCAmelCase = dataset.make_sortish_sampler(_UpperCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCamelCase )
return dataloader
def UpperCamelCase( self ):
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase( self ):
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ):
BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase )
add_generic_args(_UpperCamelCase , _UpperCamelCase )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_UpperCamelCase )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_UpperCamelCase )
parser.add_argument('''--max_tokens_per_batch''' , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument('''--logger_name''' , type=_UpperCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_UpperCamelCase , default=500 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_UpperCamelCase , default='''summarization''' , required=_UpperCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_UpperCamelCase , default=0.0 , required=_UpperCamelCase )
parser.add_argument('''--src_lang''' , type=_UpperCamelCase , default='''''' , required=_UpperCamelCase )
parser.add_argument('''--tgt_lang''' , type=_UpperCamelCase , default='''''' , required=_UpperCamelCase )
parser.add_argument('''--eval_beams''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument(
'''--val_metric''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class __UpperCamelCase ( A__ ):
__A : Dict = """translation"""
__A : Dict = ["""loss"""]
__A : str = ["""bleu"""]
__A : Tuple = """bleu"""
def __init__( self , _UpperCamelCase , **_UpperCamelCase ):
super().__init__(_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = hparams.src_lang
_UpperCAmelCase = hparams.tgt_lang
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
return calculate_bleu(_UpperCamelCase , _UpperCamelCase )
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
check_output_dir(SCREAMING_SNAKE_CASE_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
_UpperCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase = os.environ.get('''WANDB_PROJECT''' , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
_UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_UpperCAmelCase = False
_UpperCAmelCase = args.val_metric == '''loss'''
_UpperCAmelCase = generic_train(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE_ ) , early_stopping_callback=SCREAMING_SNAKE_CASE_ , logger=SCREAMING_SNAKE_CASE_ , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
_UpperCAmelCase = ''''''
_UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=SCREAMING_SNAKE_CASE_ ) )
if checkpoints:
_UpperCAmelCase = checkpoints[-1]
_UpperCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
UpperCAmelCase_ = pl.Trainer.add_argparse_args(parser)
UpperCAmelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase_ = parser.parse_args()
main(args) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import warnings
from functools import wraps
from typing import Callable
def A__ ( SCREAMING_SNAKE_CASE_ : Callable ) -> Callable:
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ = "src/diffusers"
UpperCAmelCase_ = "."
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ = spec.loader.load_module()
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
"""simple docstring"""
return line.startswith(SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , SCREAMING_SNAKE_CASE_ ) is not None
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = object_name.split('''.''' )
_UpperCAmelCase = 0
# First let's find the module where our object lives.
_UpperCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE_ ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , F'''{module}.py''' ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCAmelCase = ''''''
_UpperCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE_ ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE_ ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
UpperCAmelCase_ = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
UpperCAmelCase_ = re.compile(r"<FILL\s+[^>]*>")
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = code.split('''\n''' )
_UpperCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE_ ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
_UpperCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE_ ) ) > 0
if has_indent:
_UpperCAmelCase = F'''class Bla:\n{code}'''
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE_ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Tuple:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = search.groups()
_UpperCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = get_indent(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCAmelCase = theoretical_indent
_UpperCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE_ ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
break
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _should_continue(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and re.search(F'''^{indent}# End copy''' , SCREAMING_SNAKE_CASE_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
_UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE_ )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCAmelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE_ ) is None]
_UpperCAmelCase = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_UpperCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pattern.groups()
_UpperCAmelCase = re.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if option.strip() == "all-casing":
_UpperCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
return diffs
def A__ ( SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''**/*.py''' ) , recursive=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
for filename in all_files:
_UpperCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = 0
_UpperCAmelCase = fa + fa
_UpperCAmelCase , _UpperCAmelCase = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 32 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def A__ ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Dict ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE_ ) == "1":
_UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config['''lr''']
_UpperCAmelCase = int(config['''num_epochs'''] )
_UpperCAmelCase = int(config['''seed'''] )
_UpperCAmelCase = int(config['''batch_size'''] )
set_seed(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_UpperCAmelCase = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('''.''' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_UpperCAmelCase = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'''epoch''': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=SCREAMING_SNAKE_CASE_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = "▁"
UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Tuple = BigBirdTokenizer
__A : List[str] = BigBirdTokenizerFast
__A : List[Any] = True
__A : List[Any] = True
def UpperCamelCase( self ):
super().setUp()
_UpperCAmelCase = self.tokenizer_class(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ):
_UpperCAmelCase = '''<s>'''
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCamelCase ) , 1004 )
def UpperCamelCase( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase = tokenizer.tokenize(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = BigBirdTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = '''Hello World!'''
_UpperCAmelCase = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
_UpperCAmelCase = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def UpperCamelCase( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase = ''' '''.join(_UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCamelCase )
_UpperCAmelCase = BigBirdConfig(attention_type='''original_full''' )
_UpperCAmelCase = BigBirdModel(_UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
_UpperCAmelCase = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def UpperCamelCase( self ):
# fmt: off
_UpperCAmelCase = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , ) | 32 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase=[32, 64, 128] , _UpperCamelCase=[1, 2, 1] , _UpperCamelCase=[2, 2, 4] , _UpperCamelCase=2 , _UpperCamelCase=2.0 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=10 , _UpperCamelCase=8 , _UpperCamelCase=["stage1", "stage2"] , _UpperCamelCase=[1, 2] , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForMaskedImageModeling(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__A : Union[str, Any] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__A : int = False
__A : int = False
__A : Dict = False
__A : str = False
__A : Tuple = False
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase )
def UpperCamelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ):
return
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# FocalNet has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape
_UpperCAmelCase = (
reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
@slow
def UpperCamelCase( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = FocalNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([0.2166, -0.4368, 0.2191] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Optional[int] = (FocalNetBackbone,) if is_torch_available() else ()
__A : str = FocalNetConfig
__A : Any = False
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetModelTester(self ) | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , ):
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCamelCase( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : int = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase( self ):
_UpperCAmelCase = LevitImageProcessingTester(self )
@property
def UpperCamelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''size''' ) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 32 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
UpperCAmelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
assert len(str(SCREAMING_SNAKE_CASE_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCAmelCase = year // 1_00
_UpperCAmelCase = (5 * (century % 4) + 2) % 7
_UpperCAmelCase = year % 1_00
_UpperCAmelCase = centurian % 12
_UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( A__ ):
__A : List[str] = (UniPCMultistepScheduler,)
__A : List[str] = (("""num_inference_steps""", 25),)
def UpperCamelCase( self , **_UpperCamelCase ):
_UpperCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**_UpperCamelCase )
return config
def UpperCamelCase( self , _UpperCamelCase=0 , **_UpperCamelCase ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('''num_inference_steps''' , _UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**_UpperCamelCase )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase( self , _UpperCamelCase=0 , **_UpperCamelCase ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('''num_inference_steps''' , _UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase( self , _UpperCamelCase=None , **_UpperCamelCase ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**_UpperCamelCase )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**_UpperCamelCase )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def UpperCamelCase( self ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('''num_inference_steps''' , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , '''set_timesteps''' ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , '''set_timesteps''' ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=_UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=_UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCamelCase( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def UpperCamelCase( self ):
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def UpperCamelCase( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def UpperCamelCase( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCamelCase( self ):
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def UpperCamelCase( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def UpperCamelCase( self ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCamelCase( self , **_UpperCamelCase ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**_UpperCamelCase )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 32 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __UpperCamelCase ( A__ , A__ ):
__A : Union[str, Any] = """swin"""
__A : Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _UpperCamelCase=224 , _UpperCamelCase=4 , _UpperCamelCase=3 , _UpperCamelCase=96 , _UpperCamelCase=[2, 2, 6, 2] , _UpperCamelCase=[3, 6, 12, 24] , _UpperCamelCase=7 , _UpperCamelCase=4.0 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=False , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
_UpperCAmelCase = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_UpperCamelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names )
class __UpperCamelCase ( A__ ):
__A : Dict = version.parse("""1.11""" )
@property
def UpperCamelCase( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase( self ):
return 1e-4 | 32 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 1 |
from collections import defaultdict
from math import gcd
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_50_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ):
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1:
continue
_UpperCAmelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __UpperCamelCase ( A__ ):
__A : str = """M-CLIP"""
def __init__( self , _UpperCamelCase=1024 , _UpperCamelCase=768 , **_UpperCamelCase ):
_UpperCAmelCase = transformerDimSize
_UpperCAmelCase = imageDimSize
super().__init__(**_UpperCamelCase )
class __UpperCamelCase ( A__ ):
__A : Any = MCLIPConfig
def __init__( self , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = XLMRobertaModel(_UpperCamelCase )
_UpperCAmelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.transformer(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
_UpperCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_UpperCamelCase ), embs | 32 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( A__ ):
__A : Union[str, Any] = (DDPMParallelScheduler,)
def UpperCamelCase( self , **_UpperCamelCase ):
_UpperCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCamelCase )
return config
def UpperCamelCase( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def UpperCamelCase( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def UpperCamelCase( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def UpperCamelCase( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCamelCase )
def UpperCamelCase( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def UpperCamelCase( self ):
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def UpperCamelCase( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def UpperCamelCase( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = self.dummy_sample_deter + 0.1
_UpperCAmelCase = self.dummy_sample_deter - 0.1
_UpperCAmelCase = samplea.shape[0]
_UpperCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_UpperCAmelCase = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
_UpperCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_UpperCAmelCase = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCamelCase )
_UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_UpperCamelCase ):
if i == len(_UpperCamelCase ) - 1:
_UpperCAmelCase = -1
else:
_UpperCAmelCase = timesteps[i + 1]
_UpperCAmelCase = scheduler.previous_timestep(_UpperCamelCase )
_UpperCAmelCase = prev_t.item()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = [100, 87, 50, 1, 0]
_UpperCAmelCase = len(_UpperCamelCase )
with self.assertRaises(_UpperCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCamelCase , timesteps=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
_UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCamelCase ) | 32 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 1 |
import socket
def A__ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_UpperCAmelCase = socket.gethostname()
_UpperCAmelCase = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
_UpperCAmelCase = sock.recv(10_24 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 32 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "▁"
UpperCAmelCase_ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
UpperCAmelCase_ = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
UpperCAmelCase_ = {"vinai/bartpho-syllable": 10_24}
class __UpperCamelCase ( A__ ):
__A : List[Any] = VOCAB_FILES_NAMES
__A : Dict = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase = None , **_UpperCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = monolingual_vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCAmelCase = {}
_UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = cnt
cnt += 1
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_UpperCAmelCase = line.strip().split()[0]
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase( self ):
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase ):
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase( self , _UpperCamelCase ):
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = ''''''.join(_UpperCamelCase ).replace(_UpperCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(_UpperCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( A__ ):
__A : torch.FloatTensor
__A : torch.FloatTensor
class __UpperCamelCase ( A__ , A__ ):
__A : Dict = 1
@register_to_config
def __init__( self , _UpperCamelCase = 2000 , _UpperCamelCase = 0.15 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1348.0 , _UpperCamelCase = 1e-5 , _UpperCamelCase = 1 , ):
# standard deviation of the initial noise distribution
_UpperCAmelCase = sigma_max
# setable values
_UpperCAmelCase = None
self.set_sigmas(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
return sample
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ):
_UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_UpperCAmelCase = torch.linspace(1 , _UpperCamelCase , _UpperCamelCase , device=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None ):
_UpperCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min
_UpperCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max
_UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_UpperCAmelCase = torch.exp(torch.linspace(math.log(_UpperCamelCase ) , math.log(_UpperCamelCase ) , _UpperCamelCase ) )
_UpperCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_UpperCAmelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_UpperCAmelCase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_UpperCAmelCase = timesteps.to(self.discrete_sigmas.device )
_UpperCAmelCase = self.discrete_sigmas[timesteps].to(sample.device )
_UpperCAmelCase = self.get_adjacent_sigma(_UpperCamelCase , _UpperCamelCase ).to(sample.device )
_UpperCAmelCase = torch.zeros_like(_UpperCamelCase )
_UpperCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_UpperCAmelCase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_UpperCAmelCase = diffusion.unsqueeze(-1 )
_UpperCAmelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_UpperCAmelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCamelCase , device=sample.device , dtype=sample.dtype )
_UpperCAmelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_UpperCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCamelCase , prev_sample_mean=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_UpperCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_UpperCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_UpperCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_UpperCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_UpperCAmelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_UpperCAmelCase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_UpperCAmelCase = step_size.unsqueeze(-1 )
_UpperCAmelCase = sample + step_size * model_output
_UpperCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_UpperCAmelCase = timesteps.to(original_samples.device )
_UpperCAmelCase = self.discrete_sigmas.to(original_samples.device )[timesteps]
_UpperCAmelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCamelCase ) * sigmas[:, None, None, None]
)
_UpperCAmelCase = noise + original_samples
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps | 32 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : List[str] = """AutoTokenizer"""
__A : str = ["""tokenizer"""]
__A : Union[str, Any] = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , _UpperCamelCase , _UpperCamelCase=None ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase="speaker_embeddings_path.json" , **_UpperCamelCase ):
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , subfolder=kwargs.pop('''subfolder''' , _UpperCamelCase ) , cache_dir=kwargs.pop('''cache_dir''' , _UpperCamelCase ) , force_download=kwargs.pop('''force_download''' , _UpperCamelCase ) , proxies=kwargs.pop('''proxies''' , _UpperCamelCase ) , resume_download=kwargs.pop('''resume_download''' , _UpperCamelCase ) , local_files_only=kwargs.pop('''local_files_only''' , _UpperCamelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , _UpperCamelCase ) , revision=kwargs.pop('''revision''' , _UpperCamelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_UpperCamelCase , _UpperCamelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_UpperCAmelCase = None
else:
with open(_UpperCamelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(_UpperCamelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
return cls(tokenizer=_UpperCamelCase , speaker_embeddings=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase="speaker_embeddings_path.json" , _UpperCamelCase="speaker_embeddings" , _UpperCamelCase = False , **_UpperCamelCase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCamelCase , _UpperCamelCase , '''v2''' ) , exist_ok=_UpperCamelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(_UpperCamelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , _UpperCamelCase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_UpperCamelCase , )
_UpperCAmelCase = os.path.join(_UpperCamelCase , f'''{prompt_key}_{key}.npy''' )
_UpperCAmelCase = tmp_dict
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , '''w''' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
super().save_pretrained(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase = None , **_UpperCamelCase ):
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _UpperCamelCase ) , cache_dir=kwargs.pop('''cache_dir''' , _UpperCamelCase ) , force_download=kwargs.pop('''force_download''' , _UpperCamelCase ) , proxies=kwargs.pop('''proxies''' , _UpperCamelCase ) , resume_download=kwargs.pop('''resume_download''' , _UpperCamelCase ) , local_files_only=kwargs.pop('''local_files_only''' , _UpperCamelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , _UpperCamelCase ) , revision=kwargs.pop('''revision''' , _UpperCamelCase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_UpperCAmelCase = np.load(_UpperCamelCase )
return voice_preset_dict
def UpperCamelCase( self , _UpperCamelCase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="pt" , _UpperCamelCase=256 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , **_UpperCamelCase , ):
if voice_preset is not None and not isinstance(_UpperCamelCase , _UpperCamelCase ):
if (
isinstance(_UpperCamelCase , _UpperCamelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(_UpperCamelCase )
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and not voice_preset.endswith('''.npz''' ):
_UpperCAmelCase = voice_preset + '''.npz'''
_UpperCAmelCase = np.load(_UpperCamelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
_UpperCAmelCase = self.tokenizer(
_UpperCamelCase , return_tensors=_UpperCamelCase , padding='''max_length''' , max_length=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text | 32 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase_ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> tuple[str, float]:
"""simple docstring"""
_UpperCAmelCase = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE_ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE_ ))
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> tuple[str, str]:
"""simple docstring"""
_UpperCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_UpperCAmelCase = random.choice(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : tuple[str, float] , SCREAMING_SNAKE_CASE_ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE_ : list[str] , ) -> list[str]:
"""simple docstring"""
_UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
_UpperCAmelCase = int(parent_a[1] * 1_00 ) + 1
_UpperCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = population_score[random.randint(0 , SCREAMING_SNAKE_CASE_ )][0]
_UpperCAmelCase , _UpperCAmelCase = crossover(parent_a[0] , SCREAMING_SNAKE_CASE_ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return pop
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] , SCREAMING_SNAKE_CASE_ : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCAmelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCAmelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Generate random starting population.
_UpperCAmelCase = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCAmelCase , _UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCAmelCase = [evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for item in population]
# Check if there is a matching evolution.
_UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE_ )
# Normalize population score to be between 0 and 1.
_UpperCAmelCase = [
(item, score / len(SCREAMING_SNAKE_CASE_ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE_ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE_ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase_ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCAmelCase_ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : str = StableDiffusionLatentUpscalePipeline
__A : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__A : Dict = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__A : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__A : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A : Optional[int] = frozenset([] )
__A : str = True
@property
def UpperCamelCase( self ):
_UpperCAmelCase = 1
_UpperCAmelCase = 4
_UpperCAmelCase = (16, 16)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_UpperCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_UpperCamelCase , only_cross_attention=_UpperCamelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
_UpperCAmelCase = EulerDiscreteScheduler(prediction_type='''sample''' )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(_UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu'''
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCAmelCase = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCamelCase , 1e-3 )
def UpperCamelCase( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCamelCase( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCamelCase( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCamelCase( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCamelCase( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCamelCase( self ):
_UpperCAmelCase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_UpperCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = 2
_UpperCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCAmelCase = getattr(_UpperCamelCase , scheduler_enum.name )
_UpperCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCAmelCase = pipe(**_UpperCamelCase )[0]
outputs.append(_UpperCamelCase )
assert check_same_shape(_UpperCamelCase )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCAmelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
_UpperCAmelCase = pipe(_UpperCamelCase , generator=_UpperCamelCase , output_type='''latent''' ).images
_UpperCAmelCase = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type='''np''' , ).images[0]
_UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCAmelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
_UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
_UpperCAmelCase = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type='''np''' , ).images[0]
_UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 32 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=64 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MobileBertModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MobileBertForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MobileBertForNextSentencePrediction(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MobileBertForPreTraining(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , next_sentence_label=_UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MobileBertForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MobileBertForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : int = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__A : Union[str, Any] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Optional[Any] = True
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def UpperCamelCase( self ):
_UpperCAmelCase = MobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCamelCase )
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCamelCase )
_UpperCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(_UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
] , device=_UpperCamelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 32 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """levit"""
def __init__( self , _UpperCamelCase=224 , _UpperCamelCase=3 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=16 , _UpperCamelCase=[128, 256, 384] , _UpperCamelCase=[4, 8, 12] , _UpperCamelCase=[4, 4, 4] , _UpperCamelCase=[16, 16, 16] , _UpperCamelCase=0 , _UpperCamelCase=[2, 2, 2] , _UpperCamelCase=[2, 2, 2] , _UpperCamelCase=0.02 , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = kernel_size
_UpperCAmelCase = stride
_UpperCAmelCase = padding
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = depths
_UpperCAmelCase = key_dim
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = patch_size
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = initializer_range
_UpperCAmelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __UpperCamelCase ( A__ ):
__A : List[Any] = version.parse("""1.11""" )
@property
def UpperCamelCase( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase( self ):
return 1e-4 | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
from math import isqrt, loga
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def A__ ( SCREAMING_SNAKE_CASE_ : int = 80_08_00 , SCREAMING_SNAKE_CASE_ : int = 80_08_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = degree * loga(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = calculate_prime_numbers(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCAmelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , **_UpperCamelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase( self , **_UpperCamelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ):
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCAmelCase = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''np''' )
_UpperCAmelCase = processor(images=_UpperCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = processor(text=_UpperCamelCase )
_UpperCAmelCase = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCamelCase ):
processor()
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(_UpperCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def UpperCamelCase( *_UpperCamelCase , **_UpperCamelCase ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
__A : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCAmelCase = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = object_detector(examples[0] , threshold=0.0 )
_UpperCAmelCase = len(_UpperCamelCase )
self.assertGreater(_UpperCamelCase , 0 )
self.assertEqual(
_UpperCamelCase , [
{
'''score''': ANY(_UpperCamelCase ),
'''label''': ANY(_UpperCamelCase ),
'''box''': {'''xmin''': ANY(_UpperCamelCase ), '''ymin''': ANY(_UpperCamelCase ), '''xmax''': ANY(_UpperCamelCase ), '''ymax''': ANY(_UpperCamelCase )},
}
for i in range(_UpperCamelCase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase( self ):
pass
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCAmelCase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
_UpperCAmelCase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline('''zero-shot-object-detection''' )
_UpperCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
_UpperCAmelCase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase( self ):
pass
@require_torch
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = 0.2
_UpperCAmelCase = pipeline('''zero-shot-object-detection''' )
_UpperCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = 2
_UpperCAmelCase = pipeline('''zero-shot-object-detection''' )
_UpperCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , ) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
UpperCAmelCase_ = {
"facebook/blenderbot_small-90M": 5_12,
}
class __UpperCamelCase ( A__ ):
__A : List[str] = VOCAB_FILES_NAMES
__A : Tuple = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = BlenderbotSmallTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_UpperCamelCase , merges=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase , ) , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = add_prefix_space
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=None ):
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 32 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "0"
UpperCAmelCase_ = "1"
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
UpperCAmelCase_ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
UpperCAmelCase_ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase_ = ort.RunOptions()
UpperCAmelCase_ = 1_28
UpperCAmelCase_ = 1
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = 20_00
UpperCAmelCase_ = {}
for iter in range(max_iters):
UpperCAmelCase_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 10_00 / max_iters)) | 32 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = np.zeros_like(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
UpperCAmelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png") | 32 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 1 |
from math import isclose, sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]:
"""simple docstring"""
_UpperCAmelCase = point_y / 4 / point_x
_UpperCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_UpperCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_UpperCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_UpperCAmelCase = outgoing_gradient**2 + 4
_UpperCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_UpperCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
_UpperCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_UpperCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_UpperCAmelCase = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
_UpperCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def A__ ( SCREAMING_SNAKE_CASE_ : float = 1.4 , SCREAMING_SNAKE_CASE_ : float = -9.6 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = first_x_coord
_UpperCAmelCase = first_y_coord
_UpperCAmelCase = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """pix2struct_text_model"""
__A : Any = ["""past_key_values"""]
__A : str = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _UpperCamelCase=50244 , _UpperCamelCase=768 , _UpperCamelCase=64 , _UpperCamelCase=2048 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=32 , _UpperCamelCase=128 , _UpperCamelCase=0.1 , _UpperCamelCase=1e-6 , _UpperCamelCase=1.0 , _UpperCamelCase="gelu_new" , _UpperCamelCase=0 , _UpperCamelCase=False , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = use_cache
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
_UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ):
cls._set_token_in_kwargs(_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
_UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class __UpperCamelCase ( A__ ):
__A : List[Any] = """pix2struct_vision_model"""
def __init__( self , _UpperCamelCase=768 , _UpperCamelCase=768 , _UpperCamelCase=2048 , _UpperCamelCase=64 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase="gelu_new" , _UpperCamelCase=1e-6 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1e-10 , _UpperCamelCase=1.0 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=128 , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = patch_embed_hidden_size
_UpperCAmelCase = d_ff
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = dense_act_fn
_UpperCAmelCase = seq_len
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = d_kv
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ):
cls._set_token_in_kwargs(_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
_UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class __UpperCamelCase ( A__ ):
__A : int = """pix2struct"""
__A : List[str] = True
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1.0 , _UpperCamelCase=0.02 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
_UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
_UpperCAmelCase = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
_UpperCAmelCase = PixaStructTextConfig(**_UpperCamelCase )
_UpperCAmelCase = PixaStructVisionConfig(**_UpperCamelCase )
_UpperCAmelCase = self.text_config.decoder_start_token_id
_UpperCAmelCase = self.text_config.pad_token_id
_UpperCAmelCase = self.text_config.eos_token_id
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = is_vqa
@classmethod
def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output | 32 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 1 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""flax""", """transformers"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class __UpperCamelCase ( metaclass=A__ ):
__A : Tuple = ["""flax""", """transformers"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class __UpperCamelCase ( metaclass=A__ ):
__A : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''flax''', '''transformers'''] ) | 32 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = False
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
UpperCAmelCase_ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
UpperCAmelCase_ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
UpperCAmelCase_ = reader.read()
UpperCAmelCase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
UpperCAmelCase_ = UNetaDModel(**config)
else:
UpperCAmelCase_ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
UpperCAmelCase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase_ = config[key]
del config[key]
UpperCAmelCase_ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
UpperCAmelCase_ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
UpperCAmelCase_ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
UpperCAmelCase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
UpperCAmelCase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
UpperCAmelCase_ = param_value
UpperCAmelCase_ = True
if not has_changed:
UpperCAmelCase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 32 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__A : Union[str, Any] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__A : str = False
__A : Dict = False
__A : Any = False
__A : Tuple = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = ConvNextVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = False
_UpperCAmelCase = True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def A__ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = preprocessor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) ) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import random
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE_ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE_ )
else:
equal.append(SCREAMING_SNAKE_CASE_ )
return less, equal, greater
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
"""simple docstring"""
if index >= len(SCREAMING_SNAKE_CASE_ ) or index < 0:
return None
_UpperCAmelCase = items[random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )]
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE_ , index - (m + count) ) | 32 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ) -> list:
"""simple docstring"""
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE_ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE_ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Color current vertex
_UpperCAmelCase = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 ):
return True
# Backtrack
_UpperCAmelCase = -1
return False
def A__ ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = [-1] * len(SCREAMING_SNAKE_CASE_ )
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 ):
return colored_vertices
return [] | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=32 * 4 , _UpperCamelCase=32 * 6 , _UpperCamelCase=4 , _UpperCamelCase=32 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCamelCase )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCamelCase )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCamelCase ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCamelCase ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
def comm_check_on_output(_UpperCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
_UpperCAmelCase = model(
pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A : int = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCamelCase( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_UpperCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_UpperCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_UpperCamelCase ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase ).to(_UpperCamelCase )
_UpperCAmelCase = model(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase( self ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
_UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ = 1e-4
def A__ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCamelCase( self ):
_UpperCAmelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
_UpperCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
_UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def UpperCamelCase( self ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCamelCase )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_UpperCAmelCase = inputs['''pixel_values'''].to(_UpperCamelCase )
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''mask_labels''']]
_UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None ) | 32 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 32 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
UpperCAmelCase_ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
UpperCAmelCase_ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def UpperCamelCase( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCamelCase( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase="uniform_average" , _UpperCamelCase=True ):
_UpperCAmelCase = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse} | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
_UpperCAmelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_UpperCAmelCase = model(_UpperCamelCase )['''last_hidden_state''']
_UpperCAmelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def A__ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCamelCase ( unittest.TestCase , A__ ):
def UpperCamelCase( self ):
_UpperCAmelCase = load_tool('''text-classification''' )
self.tool.setup()
_UpperCAmelCase = load_tool('''text-classification''' , remote=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(_UpperCamelCase , '''positive''' )
def UpperCamelCase( self ):
_UpperCAmelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(_UpperCamelCase , '''positive''' )
def UpperCamelCase( self ):
_UpperCAmelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(_UpperCamelCase , '''positive''' )
def UpperCamelCase( self ):
_UpperCAmelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(_UpperCamelCase , '''positive''' ) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
if gpta_config_file == "":
_UpperCAmelCase = GPTaConfig()
else:
_UpperCAmelCase = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = GPTaModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
_UpperCAmelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_UpperCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
UpperCAmelCase_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 32 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = NystromformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = NystromformerForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = NystromformerForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NystromformerForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Any = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = NystromformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NystromformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCAmelCase = model(_UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = '''the [MASK] of Belgium is Brussels'''
_UpperCAmelCase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''pt''' )
with torch.no_grad():
_UpperCAmelCase = model(encoding.input_ids ).logits
_UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_UpperCamelCase ) , '''capital''' ) | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "T5Config"
class __UpperCamelCase ( A__ ):
__A : str = """mt5"""
__A : Optional[Any] = MTaConfig
class __UpperCamelCase ( A__ ):
__A : Tuple = """mt5"""
__A : List[str] = MTaConfig
class __UpperCamelCase ( A__ ):
__A : Optional[Any] = """mt5"""
__A : int = MTaConfig | 32 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 32 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=None ) -> str:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __UpperCamelCase :
__A : Optional[int] = OPTConfig
__A : Tuple = {}
__A : List[Any] = """gelu"""
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=99 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=20 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=16 , _UpperCamelCase=16 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = embed_dim
_UpperCAmelCase = word_embed_proj_dim
_UpperCAmelCase = False
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_UpperCamelCase , **self.config_updates , )
_UpperCAmelCase = prepare_opt_inputs_dict(_UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TFOPTModel(config=_UpperCamelCase )
_UpperCAmelCase = inputs_dict['''input_ids''']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-3 )
@require_tf
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__A : int = (TFOPTForCausalLM,) if is_tf_available() else ()
__A : Any = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__A : Dict = False
__A : Union[str, Any] = False
__A : Any = False
__A : Union[str, Any] = 10
def UpperCamelCase( self ):
_UpperCAmelCase = TFOPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_UpperCamelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCAmelCase = model_class(config=_UpperCamelCase )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_UpperCamelCase )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _UpperCamelCase )
# check that weights remain the same after resizing
_UpperCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(_UpperCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _UpperCamelCase )
_UpperCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(_UpperCamelCase )
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
__A : List[Any] = 99
def UpperCamelCase( self ):
_UpperCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_UpperCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_UpperCAmelCase = input_ids.shape[0]
_UpperCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
_UpperCAmelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_UpperCAmelCase = tf.not_equal(_UpperCamelCase , model.config.pad_token_id )
with tf.GradientTape():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase ).last_hidden_state
_UpperCAmelCase = (1, 11, 512)
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCAmelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=4e-3 ) )
_UpperCAmelCase = tf.function(_UpperCamelCase , jit_compile=_UpperCamelCase )
_UpperCAmelCase = xla_generate(_UpperCamelCase , _UpperCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=4e-2 ) )
@require_tf
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
super().setUp()
_UpperCAmelCase = '''facebook/opt-350m'''
def UpperCamelCase( self ):
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCAmelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' , padding=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_UpperCAmelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-4 ) )
_UpperCAmelCase = tf.function(_UpperCamelCase , jit_compile=_UpperCamelCase )
_UpperCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-4 ) )
@require_tf
@slow
class __UpperCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase( self ):
_UpperCAmelCase = '''facebook/opt-125m'''
_UpperCAmelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(_UpperCamelCase )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(_UpperCamelCase , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = '''facebook/opt-350m'''
_UpperCAmelCase = GPTaTokenizer.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = '''left'''
# use different length sentences to test batching
_UpperCAmelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' , padding=_UpperCamelCase )
_UpperCAmelCase = inputs['''input_ids''']
_UpperCAmelCase = model.generate(input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''] )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(input_ids=_UpperCamelCase )
_UpperCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
def UpperCamelCase( self ):
_UpperCAmelCase = '''facebook/opt-350m'''
_UpperCAmelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(_UpperCamelCase )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(_UpperCamelCase , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) | 32 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
_UpperCAmelCase = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 32 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=768 ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = proj_size
_UpperCAmelCase = CLIPVisionModel(_UpperCamelCase )
_UpperCAmelCase = PaintByExampleMapper(_UpperCamelCase )
_UpperCAmelCase = nn.LayerNorm(config.hidden_size )
_UpperCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_UpperCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = self.model(pixel_values=_UpperCamelCase )
_UpperCAmelCase = clip_output.pooler_output
_UpperCAmelCase = self.mapper(latent_states[:, None] )
_UpperCAmelCase = self.final_layer_norm(_UpperCamelCase )
_UpperCAmelCase = self.proj_out(_UpperCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = (config.num_hidden_layers + 1) // 5
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = 1
_UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , activation_fn='''gelu''' , attention_bias=_UpperCamelCase )
for _ in range(_UpperCamelCase )
] )
def UpperCamelCase( self , _UpperCamelCase ):
for block in self.blocks:
_UpperCAmelCase = block(_UpperCamelCase )
return hidden_states | 32 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Union[str, Any] = StableDiffusionPanoramaPipeline
__A : Tuple = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler()
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(_UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = '''french fries'''
_UpperCAmelCase = sd_pipe(**_UpperCamelCase , negative_prompt=_UpperCamelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase , view_batch_size=2 )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCamelCase )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self , _UpperCamelCase=0 ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCAmelCase = DDIMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCamelCase , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCamelCase )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = 0
def callback_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCAmelCase = DDIMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCamelCase , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**_UpperCamelCase , callback=_UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCAmelCase = DDIMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCamelCase , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 32 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ = threading.Lock()
UpperCAmelCase_ = None
UpperCAmelCase_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
UpperCAmelCase_ = logging.WARNING
UpperCAmelCase_ = True
def A__ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.getenv('''TRANSFORMERS_VERBOSITY''' , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def A__ ( ) -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def A__ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def A__ ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_UpperCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
_UpperCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_UpperCAmelCase = False
def A__ ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_UpperCAmelCase = None
def A__ ( ) -> List[Any]:
"""simple docstring"""
return log_levels
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
_UpperCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> int:
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> int:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> List[str]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A__ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A__ ( SCREAMING_SNAKE_CASE_ : logging.Handler ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : logging.Handler ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_UpperCAmelCase = False
def A__ ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_UpperCAmelCase = True
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
_UpperCAmelCase = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def A__ ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , SCREAMING_SNAKE_CASE_ )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE_ )
def A__ ( self : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
"""simple docstring"""
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = warning_once
class __UpperCamelCase :
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): # pylint: disable=unused-argument
_UpperCAmelCase = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , _UpperCamelCase ):
def empty_fn(*_UpperCamelCase , **_UpperCamelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return
class __UpperCamelCase :
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
_UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ = _tqdm_cls()
def A__ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def A__ ( ) -> str:
"""simple docstring"""
global _tqdm_active
_UpperCAmelCase = True
hf_hub_utils.enable_progress_bars()
def A__ ( ) -> List[Any]:
"""simple docstring"""
global _tqdm_active
_UpperCAmelCase = False
hf_hub_utils.disable_progress_bars() | 32 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = {}, {}
if padding is not None:
_UpperCAmelCase = padding
if truncation is not None:
_UpperCAmelCase = truncation
if top_k is not None:
_UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = {'''image''': image, '''question''': question}
else:
_UpperCAmelCase = image
_UpperCAmelCase = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ):
_UpperCAmelCase = load_image(inputs['''image'''] )
_UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
_UpperCAmelCase = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.model(**_UpperCamelCase )
return model_outputs
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase = model_outputs.logits.sigmoid()[0]
_UpperCAmelCase , _UpperCAmelCase = probs.topk(_UpperCamelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )] | 32 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
def A__ ( SCREAMING_SNAKE_CASE_ : nn.ModuleList , SCREAMING_SNAKE_CASE_ : nn.ModuleList , SCREAMING_SNAKE_CASE_ : List[int] ) -> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), F'''{len(SCREAMING_SNAKE_CASE_ )} != {len(SCREAMING_SNAKE_CASE_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(SCREAMING_SNAKE_CASE_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, PreTrainedModel] , SCREAMING_SNAKE_CASE_ : Union[str, Path] = "student" , SCREAMING_SNAKE_CASE_ : Union[int, None] = None , SCREAMING_SNAKE_CASE_ : Union[int, None] = None , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE_ : int , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).save_pretrained(SCREAMING_SNAKE_CASE_ ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
else:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''teacher must be a model or string got type {type(SCREAMING_SNAKE_CASE_ )}'''
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(SCREAMING_SNAKE_CASE_ )
# Copy weights
_UpperCAmelCase = teacher.config_class(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=SCREAMING_SNAKE_CASE_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(SCREAMING_SNAKE_CASE_ ) ), list(range(SCREAMING_SNAKE_CASE_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(SCREAMING_SNAKE_CASE_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
try:
if hasattr(
SCREAMING_SNAKE_CASE_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , SCREAMING_SNAKE_CASE_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , SCREAMING_SNAKE_CASE_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.decoder.block , student.decoder.block , SCREAMING_SNAKE_CASE_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase = k_size // 2
_UpperCAmelCase , _UpperCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_UpperCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = image.shape[0], image.shape[1]
# dst image height and width
_UpperCAmelCase = height - k_size + 1
_UpperCAmelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_UpperCAmelCase = zeros((dst_height * dst_width, k_size * k_size) )
_UpperCAmelCase = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = ravel(image[i : i + k_size, j : j + k_size] )
_UpperCAmelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
_UpperCAmelCase = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
_UpperCAmelCase = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCAmelCase_ = gaussian_filter(gray, 3, sigma=1)
UpperCAmelCase_ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey() | 32 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase_ = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Dict:
"""simple docstring"""
if rng is None:
_UpperCAmelCase = random.Random()
_UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
_UpperCAmelCase = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE_ )
return output
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = ids_tensor(SCREAMING_SNAKE_CASE_ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE_ )
# make sure that at least one token is attended to for each batch
_UpperCAmelCase = 1
return attn_mask
@require_flax
class __UpperCamelCase :
__A : Optional[Any] = None
__A : Tuple = ()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_UpperCAmelCase = 2
_UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
_UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
_UpperCAmelCase = jnp.ones_like(_UpperCamelCase )
_UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = pt_model_class(_UpperCamelCase ).eval()
_UpperCAmelCase = load_flax_weights_in_pytorch_model(_UpperCamelCase , flax_model.params )
_UpperCAmelCase = flax_model.generate(_UpperCamelCase ).sequences
_UpperCAmelCase = pt_model.generate(torch.tensor(_UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = True
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
_UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = True
_UpperCAmelCase = max_length
_UpperCAmelCase = 0.8
_UpperCAmelCase = 10
_UpperCAmelCase = 0.3
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = max_length
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = False
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase , attention_mask=_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase , attention_mask=_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = True
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase , attention_mask=_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase , attention_mask=_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = 2
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = model.generate(_UpperCamelCase , attention_mask=_UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(_UpperCamelCase , attention_mask=_UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
_UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_UpperCAmelCase = '''Hello world'''
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCamelCase , '''do_samples''' ):
model.generate(_UpperCamelCase , do_samples=_UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCamelCase , '''foo''' ):
_UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(_UpperCamelCase , **_UpperCamelCase ) | 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 1 |
UpperCAmelCase_ = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def A__ ( SCREAMING_SNAKE_CASE_ : float ) -> str:
"""simple docstring"""
assert type(SCREAMING_SNAKE_CASE_ ) in (int, float) and decimal == int(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
if decimal < 0:
_UpperCAmelCase = True
decimal *= -1
while decimal > 0:
_UpperCAmelCase , _UpperCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 16 )
_UpperCAmelCase = values[remainder] + hexadecimal
_UpperCAmelCase = '''0x''' + hexadecimal
if negative:
_UpperCAmelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __UpperCamelCase ( A__ ):
def UpperCamelCase( self ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = 5
# Realm tok
_UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
_UpperCAmelCase = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCAmelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
def UpperCamelCase( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCamelCase( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ):
_UpperCAmelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase( self ):
_UpperCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCamelCase( self ):
_UpperCAmelCase = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=_UpperCamelCase , )
return block_records
def UpperCamelCase( self ):
_UpperCAmelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.get_dummy_retriever()
_UpperCAmelCase = retriever.tokenizer
_UpperCAmelCase = np.array([0, 3] , dtype='''long''' )
_UpperCAmelCase = tokenizer(['''Test question'''] ).input_ids
_UpperCAmelCase = tokenizer(
['''the fourth'''] , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ).input_ids
_UpperCAmelCase = config.reader_seq_len
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = retriever(
_UpperCamelCase , _UpperCamelCase , answer_ids=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' )
self.assertEqual(len(_UpperCamelCase ) , 2 )
self.assertEqual(len(_UpperCamelCase ) , 2 )
self.assertEqual(len(_UpperCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.get_dummy_retriever()
_UpperCAmelCase = retriever.tokenizer
_UpperCAmelCase = np.array([0, 3, 5] , dtype='''long''' )
_UpperCAmelCase = tokenizer(['''Test question'''] ).input_ids
_UpperCAmelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ).input_ids
_UpperCAmelCase = config.reader_seq_len
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = retriever(
_UpperCamelCase , _UpperCamelCase , answer_ids=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _UpperCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_UpperCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_UpperCAmelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCAmelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' ) | 32 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __UpperCamelCase ( A__ ):
def UpperCamelCase( self ):
import xla_spawn
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_UpperCamelCase , '''argv''' , _UpperCamelCase ):
_UpperCAmelCase = time()
xla_spawn.main()
_UpperCAmelCase = time()
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCamelCase( self ):
import xla_spawn
_UpperCAmelCase = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(_UpperCamelCase , '''argv''' , _UpperCamelCase ):
xla_spawn.main() | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ) -> Dict:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCAmelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ) -> Dict:
"""simple docstring"""
assert _test_patching.open is open
_UpperCAmelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , SCREAMING_SNAKE_CASE_ ):
pass
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , SCREAMING_SNAKE_CASE_ ) is None
with patch_submodule(_test_patching , '''len''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCAmelCase = patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCAmelCase = '''__test_patch_submodule_successive_join__'''
_UpperCAmelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCAmelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE_ ):
pass | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('''RGB''' )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
_UpperCAmelCase = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
return image
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
if "visual_encoder" in key:
_UpperCAmelCase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , SCREAMING_SNAKE_CASE_ )
if "blocks" in key:
_UpperCAmelCase = re.sub(R'''blocks''' , '''layers''' , SCREAMING_SNAKE_CASE_ )
if "attn" in key:
_UpperCAmelCase = re.sub(R'''attn''' , '''self_attn''' , SCREAMING_SNAKE_CASE_ )
if "norm1" in key:
_UpperCAmelCase = re.sub(R'''norm1''' , '''layer_norm1''' , SCREAMING_SNAKE_CASE_ )
if "norm2" in key:
_UpperCAmelCase = re.sub(R'''norm2''' , '''layer_norm2''' , SCREAMING_SNAKE_CASE_ )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(R'''encoder.norm''' , '''post_layernorm''' , SCREAMING_SNAKE_CASE_ )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , SCREAMING_SNAKE_CASE_ )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , SCREAMING_SNAKE_CASE_ )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , SCREAMING_SNAKE_CASE_ )
if "self_attn" in key:
_UpperCAmelCase = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , SCREAMING_SNAKE_CASE_ )
return key
@torch.no_grad()
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int=None ) -> Dict:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
_UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_UpperCAmelCase = blip_decoder(pretrained=SCREAMING_SNAKE_CASE_ , image_size=3_84 , vit='''base''' )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = value
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 3_84
_UpperCAmelCase = load_demo_image(image_size=SCREAMING_SNAKE_CASE_ , device='''cpu''' )
_UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCAmelCase = tokenizer(['''a picture of'''] ).input_ids
_UpperCAmelCase = hf_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_UpperCAmelCase = hf_model.generate(SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_UpperCAmelCase = blip_vqa(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit='''base''' )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
hf_vqa_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = ['''How many dogs are in this image?''']
_UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_UpperCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_UpperCAmelCase = blip_itm(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit='''base''' )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = ['''A picture of a woman with a dog sitting in a beach''']
_UpperCAmelCase = tokenizer(
SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCAmelCase_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCAmelCase_ = "\nimport os\n"
UpperCAmelCase_ = "\ndef foo():\n import os\n return False\n"
UpperCAmelCase_ = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
UpperCAmelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = get_imports(SCREAMING_SNAKE_CASE_ )
assert parsed_imports == ["os"] | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class __UpperCamelCase :
def __init__( self ):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ):
_UpperCAmelCase = kwargs.pop('''config''' , _UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCamelCase )
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = config_dict.get('''image_processor_type''' , _UpperCamelCase )
_UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , _UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.image_processor_type``
_UpperCAmelCase = getattr(_UpperCamelCase , '''image_processor_type''' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_UpperCAmelCase = image_processor_class_from_name(_UpperCamelCase )
_UpperCAmelCase = image_processor_auto_map is not None
_UpperCAmelCase = image_processor_class is not None or type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''code_revision''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(_UpperCamelCase )]
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase ) | 32 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.