code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , **SCREAMING_SNAKE_CASE__ ):
requires_backends(self , ['''bs4'''] )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
lowercase : Tuple = []
lowercase : Tuple = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase : str = parent.find_all(child.name , recursive=SCREAMING_SNAKE_CASE__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(SCREAMING_SNAKE_CASE__ ) else next(i for i, s in enumerate(SCREAMING_SNAKE_CASE__ , 1 ) if s is child ) )
lowercase : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = BeautifulSoup(SCREAMING_SNAKE_CASE__ , '''html.parser''' )
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Optional[Any] = []
for element in html_code.descendants:
if type(SCREAMING_SNAKE_CASE__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase : List[Any] = html.unescape(SCREAMING_SNAKE_CASE__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Tuple = self.xpath_soup(SCREAMING_SNAKE_CASE__ )
stringaxtag_seq.append(SCREAMING_SNAKE_CASE__ )
stringaxsubs_seq.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = ''''''
for tagname, subs in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = False
# Check that strings has a valid type
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = True
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
if len(SCREAMING_SNAKE_CASE__ ) == 0 or isinstance(html_strings[0] , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(SCREAMING_SNAKE_CASE__ )}.""" )
lowercase : Optional[int] = bool(isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(html_strings[0] , SCREAMING_SNAKE_CASE__ )) )
if not is_batched:
lowercase : Optional[int] = [html_strings]
# Get nodes + xpaths
lowercase : List[str] = []
lowercase : Tuple = []
for html_string in html_strings:
lowercase , lowercase , lowercase : Optional[Any] = self.get_three_from_single(SCREAMING_SNAKE_CASE__ )
nodes.append(SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for node, tag_list, sub_list in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = self.construct_xpath(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
xpath_strings.append(SCREAMING_SNAKE_CASE__ )
xpaths.append(SCREAMING_SNAKE_CASE__ )
# return as Dict
lowercase : Optional[Any] = {'''nodes''': nodes, '''xpaths''': xpaths}
lowercase : List[str] = BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_inputs
| 337 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __lowercase ( ) ->List[str]:
"""simple docstring"""
lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''', type=_UpperCamelCase, default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''', type=_UpperCamelCase, default=5 )
parser.add_argument('''--batch_size''', type=_UpperCamelCase, default=6 )
parser.add_argument('''--gradient_accumulation_steps''', type=_UpperCamelCase, default=1 )
parser.add_argument('''--freeze''', type=_UpperCamelCase, default=_UpperCamelCase )
parser.add_argument('''--learning_rate''', type=_UpperCamelCase, default=5e-4 )
parser.add_argument('''--seed''', type=_UpperCamelCase, default=0 )
parser.add_argument('''--lr_scheduler_type''', type=_UpperCamelCase, default='''cosine''' )
parser.add_argument('''--num_warmup_steps''', type=_UpperCamelCase, default=10 )
parser.add_argument('''--weight_decay''', type=_UpperCamelCase, default=0.0_1 )
parser.add_argument('''--output_dir''', type=_UpperCamelCase, default='''./results''' )
return parser.parse_args()
__a = load('''accuracy''')
def __lowercase ( _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase , lowercase : Tuple = eval_pred
lowercase : Tuple = np.argmax(_UpperCamelCase, axis=1 )
return metric.compute(predictions=_UpperCamelCase, references=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : List[str] = trainer
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if control.should_evaluate:
lowercase : Dict = deepcopy(SCREAMING_SNAKE_CASE__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Tuple = get_args()
set_seed(args.seed )
lowercase : Union[str, Any] = load_dataset('''codeparrot/codecomplex''', split='''train''' )
lowercase : Any = dataset.train_test_split(test_size=0.2 )
lowercase : int = train_test['''test'''].train_test_split(test_size=0.5 )
lowercase : Optional[Any] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
lowercase : str = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase : Optional[Any] = tokenizer.eos_token
lowercase : Any = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
lowercase : List[str] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase : Dict = False
lowercase : Optional[Any] = ClassLabel(num_classes=7, names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_UpperCamelCase ):
lowercase : List[str] = tokenizer(example['''src'''], truncation=_UpperCamelCase, max_length=1024 )
lowercase : Any = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase : List[str] = train_test_validation.map(
_UpperCamelCase, batched=_UpperCamelCase, remove_columns=train_test_validation['''train'''].column_names, )
lowercase : Optional[Any] = DataCollatorWithPadding(tokenizer=_UpperCamelCase )
lowercase : str = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='''epoch''', save_strategy='''epoch''', logging_strategy='''epoch''', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.0_1, metric_for_best_model='''accuracy''', run_name='''complexity-java''', report_to='''wandb''', )
lowercase : Optional[Any] = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=tokenized_datasets['''train'''], eval_dataset=tokenized_datasets['''valid'''], tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, compute_metrics=_UpperCamelCase, )
print('''Training...''' )
trainer.add_callback(CustomCallback(_UpperCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = np.inf
def set_batch_size(_UpperCamelCase ) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : Tuple = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : str = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ) and feature.dtype == "binary":
lowercase : List[str] = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCamelCase, _UpperCamelCase )
return None if batch_size is np.inf else batch_size
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : Any = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
lowercase : List[Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase : Optional[int] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self ):
# Build iterable dataset
if self.streaming:
lowercase : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase : Union[str, Any] = None
lowercase : Tuple = None
lowercase : Dict = None
lowercase : Any = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
lowercase : Any = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : str = dataset
lowercase : Dict = path_or_buf
lowercase : Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
lowercase : int = parquet_writer_kwargs
def __lowerCamelCase ( self ):
lowercase : Any = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
lowercase : Optional[Any] = self._write(file_obj=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
else:
lowercase : Any = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
return written
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = 0
lowercase : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.dataset.features.arrow_schema
lowercase : List[Any] = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE__ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
lowercase : Dict = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __lowercase ( _UpperCamelCase=True ) ->List[str]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Optional[int] = None
A : Any = None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] = dataset_module_factory(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = import_main_class(dataset_module.module_path , dataset=SCREAMING_SNAKE_CASE__ )
lowercase : DatasetBuilder = builder_cls(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ , hash=dataset_module.hash , )
lowercase : List[str] = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=SCREAMING_SNAKE_CASE__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
lowercase : List[Any] = cached_path(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ )
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE__ ) )
@pytest.mark.integration
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : List[str] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
lowercase : Dict = dataset_module_factory('''wikipedia''', cache_dir=_UpperCamelCase )
lowercase : str = import_main_class(dataset_module.module_path )
lowercase : DatasetBuilder = builder_cls(
cache_dir=_UpperCamelCase, config_name='''20220301.frr''', hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase : Tuple = None
builder_instance.download_and_prepare()
lowercase : int = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Optional[int] = dataset_module_factory('''wikipedia''', cache_dir=_UpperCamelCase )
lowercase : Any = import_main_class(dataset_module.module_path, dataset=_UpperCamelCase )
lowercase : DatasetBuilder = builder_cls(
cache_dir=_UpperCamelCase, config_name='''20220301.frr''', hash=dataset_module.hash, )
lowercase : int = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase, _UpperCamelCase )
assert "train" in ds
assert isinstance(ds['''train'''], _UpperCamelCase )
assert next(iter(ds['''train'''] ) )
| 337 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__a = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase : Tuple = '''lm_head'''
lowercase : Optional[int] = getattr(_UpperCamelCase, _UpperCamelCase )
if weight_type is not None:
lowercase : Any = getattr(_UpperCamelCase, _UpperCamelCase ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : List[Any] = value
elif weight_type == "weight_v":
lowercase : List[Any] = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : int = []
lowercase : Optional[int] = fairseq_model.state_dict()
lowercase : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, hf_model.config.feat_extract_norm == '''group''', )
lowercase : str = True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Dict = True
if "*" in mapped_key:
lowercase : Dict = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
lowercase : List[Any] = mapped_key.replace('''*''', _UpperCamelCase )
if "weight_g" in name:
lowercase : str = '''weight_g'''
elif "weight_v" in name:
lowercase : str = '''weight_v'''
elif "bias" in name:
lowercase : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[Any] = '''weight'''
else:
lowercase : Any = None
set_recursively(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : Optional[int] = full_name.split('''conv_layers.''' )[-1]
lowercase : Optional[Any] = name.split('''.''' )
lowercase : Optional[Any] = int(items[0] )
lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None, _UpperCamelCase=None, _UpperCamelCase=True ) ->List[str]:
"""simple docstring"""
if config_path is not None:
lowercase : Optional[Any] = UniSpeechConfig.from_pretrained(_UpperCamelCase )
else:
lowercase : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase : Tuple = Dictionary.load_from_json(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Dict = target_dict.pad_index
lowercase : int = target_dict.bos_index
lowercase : Tuple = target_dict.eos_index
lowercase : Tuple = len(target_dict.symbols )
lowercase : Dict = os.path.join(_UpperCamelCase, '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : int = 42
lowercase : Any = 43
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase, _UpperCamelCase )
lowercase : Any = WavaVecaPhonemeCTCTokenizer(
_UpperCamelCase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=_UpperCamelCase, )
lowercase : List[Any] = True if config.feat_extract_norm == '''layer''' else False
lowercase : int = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_UpperCamelCase, return_attention_mask=_UpperCamelCase, )
lowercase : str = WavaVecaProcessor(feature_extractor=_UpperCamelCase, tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
lowercase : int = UniSpeechForCTC(_UpperCamelCase )
else:
lowercase : Tuple = UniSpeechForPreTraining(_UpperCamelCase )
if is_finetuned:
lowercase , lowercase , lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowercase , lowercase , lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase : List[str] = model[0].eval()
recursively_load_weights(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
hf_unispeech.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__a = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1 / 255 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = IMAGENET_DEFAULT_MEAN , SCREAMING_SNAKE_CASE__ = IMAGENET_DEFAULT_STD , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : str = size if size is not None else {'''shortest_edge''': 224}
lowercase : str = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : Any = resample
lowercase : Union[str, Any] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : Any = do_rescale
lowercase : str = rescale_factor
lowercase : Any = do_normalize
lowercase : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase : List[str] = int((256 / 224) * size['''shortest_edge'''] )
lowercase : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowercase : int = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
SCREAMING_SNAKE_CASE__ , size=(size_dict['''height'''], size_dict['''width''']) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ):
lowercase : int = do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] = resample if resample is not None else self.resample
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase : List[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : str = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = crop_size if crop_size is not None else self.crop_size
lowercase : int = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='''crop_size''' )
lowercase : Optional[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowercase : Dict = [self.resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
lowercase : Tuple = [self.center_crop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowercase : str = [self.rescale(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowercase : List[str] = [self.normalize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowercase : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowercase : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 337 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
A : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowercase : List[str] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
lowercase : str = len(SCREAMING_SNAKE_CASE__ )
self.assertGreater(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{
'''score''': ANY(SCREAMING_SNAKE_CASE__ ),
'''label''': ANY(SCREAMING_SNAKE_CASE__ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE__ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE__ )},
}
for i in range(SCREAMING_SNAKE_CASE__ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self ):
pass
@require_torch
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowercase : Dict = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
lowercase : Any = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
lowercase : List[Any] = pipeline('''zero-shot-object-detection''' )
lowercase : int = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
lowercase : Any = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self ):
pass
@require_torch
@slow
def __lowerCamelCase ( self ):
lowercase : Dict = 0.2
lowercase : Tuple = pipeline('''zero-shot-object-detection''' )
lowercase : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
lowercase : Optional[int] = 2
lowercase : Dict = pipeline('''zero-shot-object-detection''' )
lowercase : List[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 337 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = 'swinv2'
A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : int = image_size
lowercase : Union[str, Any] = patch_size
lowercase : int = num_channels
lowercase : List[Any] = embed_dim
lowercase : List[Any] = depths
lowercase : Dict = len(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = num_heads
lowercase : int = window_size
lowercase : str = mlp_ratio
lowercase : int = qkv_bias
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Tuple = drop_path_rate
lowercase : List[Any] = hidden_act
lowercase : Any = use_absolute_embeddings
lowercase : Dict = layer_norm_eps
lowercase : Optional[int] = initializer_range
lowercase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : str = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowercase : Dict = (0, 0, 0, 0)
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__a = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__a , __a = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__a = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__a = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__a = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 337 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 337 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, ) ->list[float]:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = coefficient_matrix.shape
lowercase , lowercase : int = constant_matrix.shape
if rowsa != colsa:
lowercase : Union[str, Any] = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_UpperCamelCase )
if colsa != 1:
lowercase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_UpperCamelCase )
if rowsa != rowsa:
lowercase : Dict = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_UpperCamelCase )
if len(_UpperCamelCase ) != rowsa:
lowercase : Union[str, Any] = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"""matrix but received {len(_UpperCamelCase )} and {rowsa}"""
)
raise ValueError(_UpperCamelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowercase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
lowercase , lowercase : Dict = table.shape
strictly_diagonally_dominant(_UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCamelCase ):
lowercase : Dict = []
for row in range(_UpperCamelCase ):
lowercase : List[Any] = 0
for col in range(_UpperCamelCase ):
if col == row:
lowercase : Optional[int] = table[row][col]
elif col == cols - 1:
lowercase : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase : List[str] = (temp + val) / denom
new_val.append(_UpperCamelCase )
lowercase : int = new_val
return [float(_UpperCamelCase ) for i in new_val]
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
lowercase , lowercase : str = table.shape
lowercase : Union[str, Any] = True
for i in range(0, _UpperCamelCase ):
lowercase : int = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 1 |
__a = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__a = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Union[str, Any] = True
lowercase : str = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Tuple = True
lowercase : Any = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return component
def __lowercase ( _UpperCamelCase ) ->list[list[int]]:
"""simple docstring"""
lowercase : Tuple = len(_UpperCamelCase ) * [False]
lowercase : dict[int, list[int]] = {vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
lowercase : Dict = []
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : int = []
lowercase : Dict = len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
lowercase : int = order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
lowercase : Dict = find_components(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 337 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__a = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 337 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 1 |
def __lowercase ( ) ->list[list[int]]:
"""simple docstring"""
return [list(range(1000 - i, -1000 - i, -1 ) ) for i in range(1000 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
assert all(row == sorted(_UpperCamelCase, reverse=_UpperCamelCase ) for row in grid )
assert all(list(_UpperCamelCase ) == sorted(_UpperCamelCase, reverse=_UpperCamelCase ) for col in zip(*_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[Any] = 0
lowercase : Dict = len(_UpperCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase : Dict = (left + right) // 2
lowercase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase : str = mid + 1
else:
lowercase : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : str = 0
lowercase : int = len(grid[0] )
for i in range(len(_UpperCamelCase ) ):
lowercase : Optional[int] = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCamelCase ) * len(grid[0] )) - total
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = 0
for row in grid:
for i, number in enumerate(_UpperCamelCase ):
if number < 0:
total += len(_UpperCamelCase ) - i
break
return total
def __lowercase ( ) ->None:
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''' )
lowercase : Any = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase : Dict = timeit(f"""{func}(grid=grid)""", setup=_UpperCamelCase, number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 337 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__a = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__a = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__a = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
__a = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
__a = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
__a = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
__a = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
__a = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = VOCAB_FILES_NAMES
A : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = DPRContextEncoderTokenizer
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = VOCAB_FILES_NAMES
A : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A : int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = DPRQuestionEncoderTokenizer
__a = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__a = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__a = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A__ )
class __SCREAMING_SNAKE_CASE :
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
lowercase : Tuple = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
lowercase : Optional[int] = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
lowercase : Dict = len(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE__ ) == len(
SCREAMING_SNAKE_CASE__ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts."""
lowercase : List[Any] = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids''']
lowercase : str = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids''']
lowercase : List[str] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
lowercase : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase : str = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = 64 , SCREAMING_SNAKE_CASE__ = 4 , ):
lowercase : str = reader_input['''input_ids''']
lowercase , lowercase , lowercase : Dict = reader_output[:3]
lowercase : Dict = len(SCREAMING_SNAKE_CASE__ )
lowercase : int = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase : Dict = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase : str = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase : List[str] = sequence_ids.index(self.pad_token_id )
else:
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
lowercase : int = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase : List[Any] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
lowercase : List[str] = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
A : Any = VOCAB_FILES_NAMES
A : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
A : Tuple = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
A : Any = ['input_ids', 'attention_mask']
A : str = DPRReaderTokenizer
| 337 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__a = datasets.logging.get_logger(__name__)
__a = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
__a = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
__a = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
__a = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
lowercase : str = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
lowercase : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowercase : Optional[Any] = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowercase : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowercase : Any = score.BleurtScorer(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = self.scorer.score(references=SCREAMING_SNAKE_CASE__ , candidates=SCREAMING_SNAKE_CASE__ )
return {"scores": scores}
| 337 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__a = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__a = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
lowercase : Any = bs[:]
lowercase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase : int = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase, _UpperCamelCase ) )
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = set()
lowercase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : Tuple = char
return pairs
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
lowercase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
lowercase : str = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
lowercase : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
lowercase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
lowercase : str = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = {v: k for k, v in self.encoder.items()}
lowercase : Any = errors # how to handle errors in decoding
lowercase : Union[str, Any] = bytes_to_unicode()
lowercase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle:
lowercase : str = merges_handle.read().split('''\n''' )[1:-1]
lowercase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase : int = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : Optional[Any] = {}
lowercase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : Tuple = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if token in self.cache:
return self.cache[token]
lowercase : int = tuple(SCREAMING_SNAKE_CASE__ )
lowercase : Any = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
lowercase : List[str] = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Tuple = bigram
lowercase : int = []
lowercase : int = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
lowercase : Union[str, Any] = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Union[str, Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : int = tuple(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
lowercase : List[str] = get_pairs(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : str = word
return word
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) )
return bpe_tokens
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = ''''''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Dict = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
lowercase : Tuple = 0
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase : List[str] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : str = [self.cls_token_id]
lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] = ''' ''' + text
return (text, kwargs)
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
import os
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = os.path.join(os.path.dirname(_UpperCamelCase ), '''num.txt''' )
with open(_UpperCamelCase ) as file_hand:
return str(sum(int(_UpperCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 337 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = str(id_ )
lowercase : str = None
lowercase : str = None
lowercase : str = []
lowercase : Optional[Any] = {} # {vertex:distance}
def __lt__( self , SCREAMING_SNAKE_CASE__ ):
return self.key < other.key
def __repr__( self ):
return self.id
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
self.neighbors.append(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = weight
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1], _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list:
"""simple docstring"""
lowercase : Optional[int] = []
for u in graph:
lowercase : Any = math.inf
lowercase : Dict = None
lowercase : Any = 0
lowercase : Union[str, Any] = graph[:]
while q:
lowercase : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase : str = u
lowercase : int = u.edges[v.id]
for i in range(1, len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
lowercase : Dict = math.inf
lowercase : List[Any] = None
lowercase : Tuple = 0
lowercase : Any = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase : Dict = u
lowercase : Optional[Any] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1, len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__a = '''__DUMMY_TRANSFORMERS_USER__'''
__a = '''Dummy User'''
__a = '''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'''
__a = '''https://hub-ci.huggingface.co'''
__a = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
__a = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
__a = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''', _UpperCamelCase )
@pytest.fixture
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''', _UpperCamelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''', _UpperCamelCase )
@pytest.fixture
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''', _UpperCamelCase )
@pytest.fixture
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __lowercase ( ) ->Dict:
"""simple docstring"""
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope='''session''' )
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : Dict = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def _cleanup_repo(_UpperCamelCase ):
hf_api.delete_repo(_UpperCamelCase, token=_UpperCamelCase, repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
@contextmanager
def _temporary_repo(_UpperCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Any = f"""repo_txt_data-{int(time.time() * 10e3 )}"""
lowercase : Dict = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_UpperCamelCase, token=_UpperCamelCase, repo_type='''dataset''', private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase, path_or_fileobj=str(_UpperCamelCase ), path_in_repo='''data/text_data.txt''', repo_id=_UpperCamelCase, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase, token=_UpperCamelCase, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Any = f"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
lowercase : Any = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_UpperCamelCase, token=_UpperCamelCase, repo_type='''dataset''', private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase, path_or_fileobj=str(_UpperCamelCase ), path_in_repo='''data.zip''', repo_id=_UpperCamelCase, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase, token=_UpperCamelCase, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : int = f"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
lowercase : Any = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_UpperCamelCase, token=_UpperCamelCase, repo_type='''dataset''', private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase, path_or_fileobj=str(_UpperCamelCase ), path_in_repo='''data.zip''', repo_id=_UpperCamelCase, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase, token=_UpperCamelCase, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 337 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 1 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return number | (1 << position)
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return number & ~(1 << position)
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return number ^ (1 << position)
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
super().__init__(features=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE__ )
return column
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , (str, bytes, type(SCREAMING_SNAKE_CASE__ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : Optional[Any] = {}
if isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase : int = {'''dtype''': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : Any = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowercase : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE__ )
return torch.tensor(SCREAMING_SNAKE_CASE__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE__ , '''__array__''' ) and not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowercase : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE__ , map_list=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE__ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE__ , pa_table.column_names[0] )
lowercase : Optional[Any] = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self._consolidate(SCREAMING_SNAKE_CASE__ )
return column
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
for column_name in batch:
lowercase : Dict = self._consolidate(batch[column_name] )
return batch
| 337 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
A : str = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def __lowercase ( ) ->str:
"""simple docstring"""
if os.name == "nt":
lowercase : Optional[Any] = CursorInfo()
lowercase : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase, ctypes.byref(_UpperCamelCase ) )
lowercase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase, ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def __lowercase ( ) ->List[str]:
"""simple docstring"""
if os.name == "nt":
lowercase : Any = CursorInfo()
lowercase : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase, ctypes.byref(_UpperCamelCase ) )
lowercase : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase, ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 337 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase="attention" ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase : int = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase : Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase : str = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=False ) ->Optional[int]:
"""simple docstring"""
if split_mlp_wi:
lowercase : Union[str, Any] = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase : Dict = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase : Union[str, Any] = (wi_a, wi_a)
else:
lowercase : Union[str, Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase : Tuple = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __lowercase ( _UpperCamelCase, *, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : Optional[Any] = traverse_util.flatten_dict(variables['''target'''] )
lowercase : Dict = {'''/'''.join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase : Optional[Any] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''', _UpperCamelCase )
lowercase : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase : Dict = tax_layer_norm_lookup(_UpperCamelCase, _UpperCamelCase, '''encoder''', '''pre_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase : str = tax_attention_lookup(_UpperCamelCase, _UpperCamelCase, '''encoder''', '''attention''' )
lowercase : Union[str, Any] = layer_norm
lowercase : Dict = k.T
lowercase : Optional[int] = o.T
lowercase : Optional[Any] = q.T
lowercase : List[str] = v.T
# Block i, layer 1 (MLP).
lowercase : int = tax_layer_norm_lookup(_UpperCamelCase, _UpperCamelCase, '''encoder''', '''pre_mlp_layer_norm''' )
lowercase , lowercase : List[str] = tax_mlp_lookup(_UpperCamelCase, _UpperCamelCase, '''encoder''', _UpperCamelCase )
lowercase : Dict = layer_norm
if split_mlp_wi:
lowercase : Optional[Any] = wi[0].T
lowercase : int = wi[1].T
else:
lowercase : Optional[Any] = wi.T
lowercase : Dict = wo.T
lowercase : str = old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowercase : Dict = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase : List[Any] = tax_layer_norm_lookup(_UpperCamelCase, _UpperCamelCase, '''decoder''', '''pre_self_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase : Optional[int] = tax_attention_lookup(_UpperCamelCase, _UpperCamelCase, '''decoder''', '''self_attention''' )
lowercase : Union[str, Any] = layer_norm
lowercase : Any = k.T
lowercase : Tuple = o.T
lowercase : Optional[Any] = q.T
lowercase : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
lowercase : Tuple = tax_layer_norm_lookup(_UpperCamelCase, _UpperCamelCase, '''decoder''', '''pre_cross_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase : Any = tax_attention_lookup(_UpperCamelCase, _UpperCamelCase, '''decoder''', '''encoder_decoder_attention''' )
lowercase : str = layer_norm
lowercase : List[str] = k.T
lowercase : str = o.T
lowercase : Optional[int] = q.T
lowercase : Dict = v.T
# Block i, layer 2 (MLP).
lowercase : Tuple = tax_layer_norm_lookup(_UpperCamelCase, _UpperCamelCase, '''decoder''', '''pre_mlp_layer_norm''' )
lowercase , lowercase : str = tax_mlp_lookup(_UpperCamelCase, _UpperCamelCase, '''decoder''', _UpperCamelCase )
lowercase : List[Any] = layer_norm
if split_mlp_wi:
lowercase : str = wi[0].T
lowercase : Any = wi[1].T
else:
lowercase : Dict = wi.T
lowercase : List[Any] = wo.T
lowercase : List[str] = old['''decoder/decoder_norm/scale''']
lowercase : Dict = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase : Optional[int] = old['''decoder/logits_dense/kernel'''].T
return new
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase : Dict = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase : List[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase : int = state_dict['''shared.weight''']
return state_dict
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = checkpoints.load_tax_checkpoint(_UpperCamelCase )
lowercase : List[str] = convert_tax_to_pytorch(_UpperCamelCase, num_layers=config.num_layers, is_encoder_only=_UpperCamelCase )
lowercase : str = make_state_dict(_UpperCamelCase, _UpperCamelCase )
model.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = False ) ->Dict:
"""simple docstring"""
lowercase : Tuple = TaConfig.from_json_file(_UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase : Optional[int] = TaEncoderModel(_UpperCamelCase )
else:
lowercase : int = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__a = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 1 |
import math
class __SCREAMING_SNAKE_CASE :
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = 0.0
lowercase : Any = 0.0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowercase ( ) ->None:
"""simple docstring"""
lowercase : Tuple = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase : Optional[int] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase : Dict = SelfOrganizingMap()
lowercase : Tuple = 3
lowercase : Optional[Any] = 0.5
for _ in range(_UpperCamelCase ):
for j in range(len(_UpperCamelCase ) ):
# training sample
lowercase : Optional[Any] = training_samples[j]
# Compute the winning vector
lowercase : Optional[Any] = self_organizing_map.get_winner(_UpperCamelCase, _UpperCamelCase )
# Update the winning vector
lowercase : int = self_organizing_map.update(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# classify test sample
lowercase : Optional[int] = [0, 0, 0, 1]
lowercase : str = self_organizing_map.get_winner(_UpperCamelCase, _UpperCamelCase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 337 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCamelCase ( self ):
lowercase : List[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase : Dict = load_dataset('''ashraq/esc50''' )
lowercase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
lowercase : int = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self ):
pass
@slow
@require_torch
def __lowerCamelCase ( self ):
lowercase : List[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase : Union[str, Any] = load_dataset('''ashraq/esc50''' )
lowercase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
lowercase : List[str] = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase : int = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self ):
pass
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = 'sew'
def __init__( self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = hidden_size
lowercase : Any = feat_extract_norm
lowercase : Optional[Any] = feat_extract_activation
lowercase : Tuple = list(SCREAMING_SNAKE_CASE__ )
lowercase : Any = list(SCREAMING_SNAKE_CASE__ )
lowercase : str = list(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = conv_bias
lowercase : Tuple = num_conv_pos_embeddings
lowercase : Optional[Any] = num_conv_pos_embedding_groups
lowercase : List[str] = len(self.conv_dim )
lowercase : Optional[int] = num_hidden_layers
lowercase : Tuple = intermediate_size
lowercase : Optional[Any] = squeeze_factor
lowercase : Any = hidden_act
lowercase : Optional[int] = num_attention_heads
lowercase : Union[str, Any] = hidden_dropout
lowercase : Dict = attention_dropout
lowercase : Union[str, Any] = activation_dropout
lowercase : Dict = feat_proj_dropout
lowercase : Optional[int] = final_dropout
lowercase : Optional[Any] = layerdrop
lowercase : Union[str, Any] = layer_norm_eps
lowercase : Optional[int] = initializer_range
lowercase : Union[str, Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase : int = apply_spec_augment
lowercase : Optional[Any] = mask_time_prob
lowercase : Union[str, Any] = mask_time_length
lowercase : List[str] = mask_time_min_masks
lowercase : str = mask_feature_prob
lowercase : Union[str, Any] = mask_feature_length
lowercase : Union[str, Any] = mask_feature_min_masks
# ctc loss
lowercase : List[str] = ctc_loss_reduction
lowercase : List[str] = ctc_zero_infinity
# sequence classification
lowercase : Any = use_weighted_layer_sum
lowercase : Dict = classifier_proj_size
@property
def __lowerCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 337 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_UpperCamelCase, _UpperCamelCase ) ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowercase : int = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(_UpperCamelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowercase : List[Any] = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(_UpperCamelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
lowercase : Dict = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(_UpperCamelCase )
lowercase : Tuple = []
for value in value_array:
lowercase : Dict = euclidean(_UpperCamelCase, dataset[0] )
lowercase : List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowercase : Union[str, Any] = euclidean(_UpperCamelCase, _UpperCamelCase )
if dist > temp_dist:
lowercase : Any = temp_dist
lowercase : Union[str, Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->float:
"""simple docstring"""
return np.dot(_UpperCamelCase, _UpperCamelCase ) / (norm(_UpperCamelCase ) * norm(_UpperCamelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = torch.nn.Linear(10 , 10 )
lowercase : List[Any] = torch.optim.SGD(model.parameters() , 0.1 )
lowercase : Union[str, Any] = Accelerator()
lowercase : Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
__a = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ['input_ids', 'attention_mask']
A : List[str] = CodeGenTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if kwargs.pop('''add_bos_token''' , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowercase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : Union[str, Any] = add_prefix_space
lowercase : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = add_prefix_space
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[str] = super().decode(
token_ids=SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase : Optional[Any] = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return decoded_text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def find_re(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = pattern.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return m.start() if m else -1
lowercase : Dict = [re.compile(SCREAMING_SNAKE_CASE__ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase : List[Any] = list(re.finditer('''^print''' , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowercase : Any = completion[: prints[1].start()]
lowercase : Optional[Any] = list(re.finditer('''^def''' , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowercase : Union[str, Any] = completion[: defs[1].start()]
lowercase : Any = 0
lowercase : Optional[int] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE__ )]
else:
return completion
| 337 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 * 8 , SCREAMING_SNAKE_CASE__=32 * 8 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=64 , ):
lowercase : int = parent
lowercase : Optional[int] = batch_size
lowercase : Optional[Any] = is_training
lowercase : Dict = use_auxiliary_loss
lowercase : Any = num_queries
lowercase : Dict = num_channels
lowercase : List[Any] = min_size
lowercase : List[Any] = max_size
lowercase : Optional[int] = num_labels
lowercase : Optional[int] = hidden_dim
lowercase : int = hidden_dim
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE__ )
lowercase : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE__ ) > 0.5
).float()
lowercase : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE__ ) > 0.5).long()
lowercase : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowerCamelCase ( self ):
lowercase : int = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase : Dict = self.num_queries
lowercase : str = self.num_labels
lowercase : Any = [1, 1, 1, 1]
lowercase : Union[str, Any] = self.num_channels
lowercase : Dict = 64
lowercase : Dict = 128
lowercase : Tuple = self.hidden_dim
lowercase : Optional[Any] = self.hidden_dim
lowercase : List[Any] = self.hidden_dim
return config
def __lowerCamelCase ( self ):
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = self.prepare_config_and_inputs()
lowercase : Tuple = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = output.encoder_hidden_states
lowercase : int = output.pixel_decoder_hidden_states
lowercase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , config.decoder_layers )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
with torch.no_grad():
lowercase : List[str] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ )
lowercase : str = model(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase : Union[str, Any] = model(pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ )
comm_check_on_output(SCREAMING_SNAKE_CASE__ )
lowercase : str = model(
pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ )
comm_check_on_output(SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A : List[str] = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
A : Union[str, Any] = False
A : Union[str, Any] = False
A : Optional[int] = False
A : Optional[int] = False
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = MaskaFormerModelTester(self )
lowercase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase , lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def __lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Optional[Any] = [*signature.parameters.keys()]
lowercase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase : List[str] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = (self.model_tester.min_size,) * 2
lowercase : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE__ ),
'''class_labels''': torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE__ ).long(),
}
lowercase : Any = self.model_tester.get_config()
lowercase : Any = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Any = model(**SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.loss is not None )
def __lowerCamelCase ( self ):
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = model(**SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.attentions is not None )
def __lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
lowercase : Optional[int] = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs()
lowercase : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
lowercase : Union[str, Any] = True
lowercase : Any = True
lowercase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : Any = model(SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ )
lowercase : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__a = 1e-4
def __lowercase ( ) ->Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowerCamelCase ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowerCamelCase ( self ):
lowercase : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ )
lowercase : str = self.default_image_processor
lowercase : Dict = prepare_img()
lowercase : List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase : Any = model(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
lowercase : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : int = self.default_image_processor
lowercase : Dict = prepare_img()
lowercase : int = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase : Tuple = model(**SCREAMING_SNAKE_CASE__ )
# masks_queries_logits
lowercase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase : Optional[int] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowercase : Dict = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
# class_queries_logits
lowercase : Optional[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Union[str, Any] = self.default_image_processor
lowercase : Dict = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase : Any = inputs['''pixel_values'''].to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['''mask_labels''']]
lowercase : Optional[Any] = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.loss is not None )
| 337 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Any = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowercase : Optional[Any] = Dataset.from_dict(_UpperCamelCase )
return dataset
class __SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCamelCase ( self ):
lowercase : List[Any] = get_dataset()
lowercase : Optional[int] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCamelCase ( self ):
lowercase : int = get_dataset()
lowercase , lowercase : Optional[Any] = deduplicate_dataset(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2 )
print(SCREAMING_SNAKE_CASE__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , SCREAMING_SNAKE_CASE__ )
| 337 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
A : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A : Optional[str] = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A : Optional[str] = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A : Optional[str] = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
A : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A : bool = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
lowercase : Any = processors[data_args.task_name]()
lowercase : Union[str, Any] = processor.get_labels()
lowercase : Tuple = len(_UpperCamelCase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_UpperCamelCase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
lowercase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
lowercase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, )
# Get datasets
lowercase : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
lowercase : Union[str, Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(_UpperCamelCase ) -> Dict:
lowercase : List[Any] = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(_UpperCamelCase, p.label_ids )}
# Data collator
lowercase : Union[str, Any] = DataCollatorWithPadding(_UpperCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase : List[str] = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=_UpperCamelCase, eval_dataset=_UpperCamelCase, compute_metrics=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : Union[str, Any] = trainer.evaluate()
lowercase : Optional[Any] = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(_UpperCamelCase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', _UpperCamelCase, _UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_UpperCamelCase )
return results
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 337 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = 'cvt'
def __init__( self , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=[7, 3, 3] , SCREAMING_SNAKE_CASE__=[4, 2, 2] , SCREAMING_SNAKE_CASE__=[2, 1, 1] , SCREAMING_SNAKE_CASE__=[64, 192, 384] , SCREAMING_SNAKE_CASE__=[1, 3, 6] , SCREAMING_SNAKE_CASE__=[1, 2, 10] , SCREAMING_SNAKE_CASE__=[4.0, 4.0, 4.0] , SCREAMING_SNAKE_CASE__=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE__=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE__=[0.0, 0.0, 0.1] , SCREAMING_SNAKE_CASE__=[True, True, True] , SCREAMING_SNAKE_CASE__=[False, False, True] , SCREAMING_SNAKE_CASE__=["dw_bn", "dw_bn", "dw_bn"] , SCREAMING_SNAKE_CASE__=[3, 3, 3] , SCREAMING_SNAKE_CASE__=[1, 1, 1] , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=[1, 1, 1] , SCREAMING_SNAKE_CASE__=[1, 1, 1] , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = num_channels
lowercase : Union[str, Any] = patch_sizes
lowercase : List[Any] = patch_stride
lowercase : Dict = patch_padding
lowercase : Tuple = embed_dim
lowercase : List[str] = num_heads
lowercase : Dict = depth
lowercase : Optional[Any] = mlp_ratio
lowercase : str = attention_drop_rate
lowercase : List[str] = drop_rate
lowercase : Optional[Any] = drop_path_rate
lowercase : str = qkv_bias
lowercase : int = cls_token
lowercase : Optional[int] = qkv_projection_method
lowercase : Optional[Any] = kernel_qkv
lowercase : Tuple = padding_kv
lowercase : Optional[Any] = stride_kv
lowercase : Dict = padding_q
lowercase : List[Any] = stride_q
lowercase : str = initializer_range
lowercase : str = layer_norm_eps
| 337 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->Dict:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_UpperCamelCase, id=_UpperCamelCase )
| 337 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 768 , ):
super().__init__()
lowercase : str = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE__ ) )
lowercase : Tuple = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : List[Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
lowercase : int = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
return self
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = (embeds * self.std) + self.mean
return embeds
| 337 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 1 |
import copy
import re
class __SCREAMING_SNAKE_CASE :
A : str = 'hp'
A : int = {}
A : List[Any] = None
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = prefix
lowercase : Dict = defaults
cls.build_naming_info()
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return ""
lowercase : List[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 ):
lowercase : Dict = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase : str = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(SCREAMING_SNAKE_CASE__ ):
lowercase : int = ''''''
while integer != 0:
lowercase : Optional[int] = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
lowercase : List[str] = 0
while True:
lowercase : Any = word + '''#''' + int_to_alphabetic(SCREAMING_SNAKE_CASE__ )
if sword in info["reverse_short_word"]:
continue
else:
lowercase : Union[str, Any] = sword
break
lowercase : Dict = short_word
lowercase : Optional[int] = word
return short_word
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = param_name.split('''_''' )
lowercase : Any = [TrialShortNamer.shortname_for_word(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase : Optional[Any] = ['''''', '''_''']
for separator in separators:
lowercase : Any = separator.join(SCREAMING_SNAKE_CASE__ )
if shortname not in info["reverse_short_param"]:
lowercase : List[Any] = shortname
lowercase : Any = param_name
return shortname
return param_name
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = TrialShortNamer.shortname_for_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = short_name
lowercase : int = param_name
@classmethod
def __lowerCamelCase ( cls ):
if cls.NAMING_INFO is not None:
return
lowercase : Tuple = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
lowercase : Optional[int] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = info
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ ):
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase : List[Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase : str = cls.NAMING_INFO['''short_param'''][k]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = 1 if v else 0
lowercase : str = '''''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else '''-'''
lowercase : Optional[Any] = f"""{key}{sep}{v}"""
name.append(SCREAMING_SNAKE_CASE__ )
return "_".join(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ ):
lowercase : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase : List[Any] = []
else:
lowercase : Any = repr.split('''_''' )
lowercase : List[str] = {}
for value in values:
if "-" in value:
lowercase , lowercase : Tuple = value.split('''-''' )
else:
lowercase : Tuple = re.sub('''[0-9.]''' , '''''' , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = float(re.sub('''[^0-9.]''' , '''''' , SCREAMING_SNAKE_CASE__ ) )
lowercase : List[str] = cls.NAMING_INFO['''reverse_short_param'''][p_k]
lowercase : Dict = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase : Dict = cls.DEFAULTS[k]
return parameters
| 337 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__a = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
lowercase : Any = primes[group]['''prime''']
lowercase : List[str] = primes[group]['''generator''']
lowercase : Tuple = int(hexlify(urandom(32 ) ) , base=16 )
def __lowerCamelCase ( self ):
return hex(self.__private_key )[2:]
def __lowerCamelCase ( self ):
lowercase : Optional[int] = pow(self.generator , self.__private_key , self.prime )
return hex(SCREAMING_SNAKE_CASE__ )[2:]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(SCREAMING_SNAKE_CASE__ , base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Invalid public key''' )
lowercase : Optional[int] = pow(SCREAMING_SNAKE_CASE__ , self.__private_key , self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (prime - 1) // 2 , SCREAMING_SNAKE_CASE__ ) == 1
)
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 14 ):
lowercase : Any = int(SCREAMING_SNAKE_CASE__ , base=16 )
lowercase : Dict = int(SCREAMING_SNAKE_CASE__ , base=16 )
lowercase : Dict = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Invalid public key''' )
lowercase : Union[str, Any] = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self ):
super().__init__()
lowercase : List[str] = nn.Linear(3 , 4 )
lowercase : Optional[Any] = nn.BatchNormad(4 )
lowercase : Optional[int] = nn.Linear(4 , 5 )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return (args[0] + 1,) + args[1:], kwargs
class __SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return output + 1
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : List[str] = ModelForTest()
lowercase : Dict = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '''_old_forward''' ) )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = ModelForTest()
lowercase : Optional[Any] = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , '''_old_forward''' ) )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = ModelForTest()
lowercase : List[str] = torch.randn(2 , 3 )
lowercase : Dict = test_model(x + 1 )
lowercase : Union[str, Any] = test_model(x + 2 )
lowercase : Union[str, Any] = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase : List[Any] = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = ModelForTest()
lowercase : Union[str, Any] = torch.randn(2 , 3 )
lowercase : List[Any] = test_model(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase : Tuple = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1E-5 )
def __lowerCamelCase ( self ):
lowercase : Tuple = ModelForTest()
lowercase : Optional[int] = torch.randn(2 , 3 )
lowercase : List[str] = test_model(SCREAMING_SNAKE_CASE__ )
lowercase : str = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowercase : Optional[Any] = True
lowercase : int = test_model(SCREAMING_SNAKE_CASE__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __lowerCamelCase ( self ):
lowercase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowercase : Tuple = torch.randn(2 , 3 )
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )
lowercase : List[str] = torch.randn(2 , 3 ).to(0 )
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(0 ) )
def __lowerCamelCase ( self ):
lowercase : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase : str = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase : str = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = torch.randn(2 , 3 )
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
lowercase : Optional[int] = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase : str = torch.randn(2 , 3 )
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def __lowerCamelCase ( self ):
lowercase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase : Union[str, Any] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase : str = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = torch.randn(2 , 3 )
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase : int = torch.randn(2 , 3 )
lowercase : Dict = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def __lowerCamelCase ( self ):
lowercase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase : int = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase : Tuple = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowercase : str = torch.randn(2 , 3 )
lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase : Dict = torch.randn(2 , 3 )
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 337 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Optional[int] = CTRLTokenizer
A : Tuple = False
A : Union[str, Any] = False
def __lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase : int = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : List[Any] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = '''adapt react readapt apt'''
lowercase : Dict = '''adapt react readapt apt'''
return input_text, output_text
def __lowerCamelCase ( self ):
lowercase : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Union[str, Any] = '''adapt react readapt apt'''
lowercase : Tuple = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = tokens + [tokenizer.unk_token]
lowercase : List[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 337 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 1 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=A__ ):
A : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=A__ ):
A : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=A__ ):
A : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=A__ ):
A : Any = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=A__ ):
A : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=A__ ):
A : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ):
lowercase : str = parent
lowercase : int = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : int = is_training
lowercase : Optional[int] = use_input_mask
lowercase : List[Any] = use_token_type_ids
lowercase : int = use_labels
lowercase : Dict = vocab_size
lowercase : str = embedding_size
lowercase : int = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Any = num_hidden_groups
lowercase : Tuple = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : Dict = type_vocab_size
lowercase : Any = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : Optional[Any] = num_labels
lowercase : List[str] = num_choices
lowercase : Optional[Any] = scope
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any = None
if self.use_token_type_ids:
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Union[str, Any] = None
lowercase : Dict = None
lowercase : List[Any] = None
if self.use_labels:
lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = AlbertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : List[Any] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , sentence_order_label=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : int = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = self.num_labels
lowercase : List[str] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.num_labels
lowercase : Optional[Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : int = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = self.num_choices
lowercase : Tuple = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : str = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Any = config_and_inputs
lowercase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : Dict = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Union[str, Any] = True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
lowercase : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = AlbertModelTester(self )
lowercase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : List[Any] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
lowercase : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
lowercase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 337 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = '''RegNetConfig'''
# Base docstring
__a = '''facebook/regnet-y-040'''
__a = [1, 10_88, 7, 7]
# Image classification docstring
__a = '''facebook/regnet-y-040'''
__a = '''tabby, tabby cat'''
__a = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = "relu" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , strides=SCREAMING_SNAKE_CASE__ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE__ , use_bias=SCREAMING_SNAKE_CASE__ , name='''convolution''' , )
lowercase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
lowercase : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__ ) )
lowercase : Optional[Any] = self.normalization(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = config.num_channels
lowercase : Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = shape_list(SCREAMING_SNAKE_CASE__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase : List[str] = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 2, 3, 1) )
lowercase : int = self.embedder(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE__ , use_bias=SCREAMING_SNAKE_CASE__ , name='''convolution''' )
lowercase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ , name='''pooler''' )
lowercase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__ )
for layer_module in self.attention:
lowercase : Any = layer_module(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = hidden_state * pooled
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = in_channels != out_channels or stride != 1
lowercase : Optional[Any] = max(1 , out_channels // config.groups_width )
lowercase : List[str] = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase : Union[str, Any] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ , name='''layer.2''' ),
]
lowercase : Any = ACTaFN[config.hidden_act]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = hidden_state
for layer_module in self.layers:
lowercase : Optional[Any] = layer_module(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowercase : Tuple = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : str = in_channels != out_channels or stride != 1
lowercase : int = max(1 , out_channels // config.groups_width )
lowercase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ , name='''layer.3''' ),
]
lowercase : Any = ACTaFN[config.hidden_act]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = hidden_state
for layer_module in self.layers:
lowercase : Union[str, Any] = layer_module(SCREAMING_SNAKE_CASE__ )
lowercase : str = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowercase : Union[str, Any] = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase : int = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
for layer_module in self.layers:
lowercase : Optional[Any] = layer_module(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ , name=f"""stages.{i+1}""" ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True ):
lowercase : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase : List[str] = hidden_states + (hidden_state,)
lowercase : List[str] = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
lowercase : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
@keras_serializable
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
A : List[Any] = RegNetConfig
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : str = config
lowercase : Any = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ , name='''embedder''' )
lowercase : Optional[Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ , name='''encoder''' )
lowercase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ , name='''pooler''' )
@unpack_inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ):
lowercase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = encoder_outputs[0]
lowercase : Tuple = self.pooler(SCREAMING_SNAKE_CASE__ )
# Change to NCHW output format have uniformity in the modules
lowercase : str = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) )
lowercase : str = tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Optional[int] = RegNetConfig
A : int = 'regnet'
A : Union[str, Any] = 'pixel_values'
@property
def __lowerCamelCase ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__a = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
__a = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , A__ , )
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : str = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , ):
lowercase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Union[str, Any] = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , A__ , )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = config.num_labels
lowercase : Tuple = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ , name='''regnet''' )
# classification head
lowercase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , ):
lowercase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Dict = self.regnet(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowercase : str = outputs.pooler_output if return_dict else outputs[1]
lowercase : List[str] = self.classifier[0](SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = self.classifier[1](SCREAMING_SNAKE_CASE__ )
lowercase : Dict = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ )
if not return_dict:
lowercase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 337 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 1 |
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : int = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowercase : Optional[int] = module._original_module if isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) else module
class __SCREAMING_SNAKE_CASE :
A : str = []
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Union[str, Any] = obj
lowercase : Dict = target
lowercase : Union[str, Any] = new
lowercase : Optional[int] = target.split('''.''' )[0]
lowercase : Union[str, Any] = {}
lowercase : str = attrs or []
def __enter__( self ):
*lowercase , lowercase : int = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
lowercase : Dict = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowercase : Tuple = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowercase : Tuple = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(SCREAMING_SNAKE_CASE__ , attrs=self.attrs ) )
lowercase : int = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , attrs=self.attrs ) )
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowercase : Union[str, Any] = getattr(import_module('''.'''.join(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE__ ) is attr_value:
lowercase : List[Any] = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowercase : str = globals()['''__builtins__'''][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *SCREAMING_SNAKE_CASE__ ):
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.original.pop(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
self.__enter__()
self._active_patches.append(self )
def __lowerCamelCase ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if isinstance(_UpperCamelCase, collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __SCREAMING_SNAKE_CASE :
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = TFVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Optional[Any] = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = model(input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : str = after_output[0].numpy()
lowercase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-5 )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : List[Any] = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = model(
input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] = to_atuple(vision_model.config.image_size )
lowercase : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowercase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase : str = output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : int = self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
lowercase , lowercase : Dict = self.get_pretrained_model_and_inputs()
lowercase : Union[str, Any] = model_a(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : int = TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = model_a(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = after_outputs[0].numpy()
lowercase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-5 )
@require_tf
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
lowercase : str = 13
lowercase : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowercase : Optional[Any] = random_attention_mask([batch_size, 4] )
lowercase : Optional[int] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = TFViTModel(SCREAMING_SNAKE_CASE__ , name='''vision_model''' )
lowercase : Any = TFBertModel(SCREAMING_SNAKE_CASE__ , name='''text_model''' )
return vision_model, text_model
def __lowerCamelCase ( self ):
lowercase : Optional[int] = TFViTModelTester(self )
lowercase : List[str] = TFBertModelTester(self )
lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
lowercase : Tuple = bert_model_tester.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Union[str, Any] = vision_config_and_inputs
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
def __lowerCamelCase ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
lowercase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
lowercase : Tuple = 13
lowercase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase : str = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowercase : Optional[Any] = random_attention_mask([batch_size, 4] )
lowercase : Optional[int] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Any = self.get_vision_text_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE__ , text_model=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model(
input_ids=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase : List[str] = to_atuple(vision_model.config.image_size )
lowercase : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase : Tuple = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = TFDeiTModel(SCREAMING_SNAKE_CASE__ , name='''vision_model''' )
lowercase : Optional[int] = TFRobertaModel(SCREAMING_SNAKE_CASE__ , name='''text_model''' )
return vision_model, text_model
def __lowerCamelCase ( self ):
lowercase : Tuple = TFDeiTModelTester(self )
lowercase : str = TFRobertaModelTester(self )
lowercase : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Union[str, Any] = vision_config_and_inputs
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
lowercase : List[Any] = 13
lowercase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowercase : int = random_attention_mask([batch_size, 4] )
lowercase : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = TFCLIPVisionModel(SCREAMING_SNAKE_CASE__ , name='''vision_model''' )
lowercase : List[str] = TFBertModel(SCREAMING_SNAKE_CASE__ , name='''text_model''' )
return vision_model, text_model
def __lowerCamelCase ( self ):
lowercase : int = TFCLIPVisionModelTester(self )
lowercase : str = TFBertModelTester(self )
lowercase : int = clip_model_tester.prepare_config_and_inputs()
lowercase : int = bert_model_tester.prepare_config_and_inputs()
lowercase , lowercase : Union[str, Any] = vision_config_and_inputs
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
lowercase : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowercase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase : Optional[int] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
lowercase : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase : str = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
from ...configuration_utils import PretrainedConfig
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Optional[Any] = 'bert-generation'
def __init__( self , SCREAMING_SNAKE_CASE__=50358 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : Any = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : Tuple = hidden_act
lowercase : Union[str, Any] = intermediate_size
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : int = max_position_embeddings
lowercase : Any = initializer_range
lowercase : Tuple = layer_norm_eps
lowercase : Optional[Any] = position_embedding_type
lowercase : Optional[Any] = use_cache
| 337 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 1 |
def __lowercase ( _UpperCamelCase ) ->list:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) - 1, 0, -1 ):
lowercase : Optional[Any] = False
for j in range(_UpperCamelCase, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase , lowercase : Dict = unsorted[j - 1], unsorted[j]
lowercase : Any = True
for j in range(_UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
lowercase , lowercase : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('''Enter numbers separated by a comma:\n''').strip()
__a = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 337 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'timm_backbone'
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Dict = backbone
lowercase : List[Any] = num_channels
lowercase : List[Any] = features_only
lowercase : str = use_pretrained_backbone
lowercase : str = True
lowercase : List[str] = out_indices if out_indices is not None else (-1,)
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
import requests
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->None:
"""simple docstring"""
lowercase : Union[str, Any] = {'''Content-Type''': '''application/json'''}
lowercase : Union[str, Any] = requests.post(_UpperCamelCase, json={'''text''': message_body}, headers=_UpperCamelCase )
if response.status_code != 200:
lowercase : List[Any] = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 337 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __SCREAMING_SNAKE_CASE ( A__ ):
A : BigBirdConfig
A : jnp.dtype = jnp.floataa
A : bool = True
def __lowerCamelCase ( self ):
super().setup()
lowercase : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : Dict = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = FlaxBigBirdForNaturalQuestionsModule
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
def cross_entropy(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None ):
lowercase : str = logits.shape[-1]
lowercase : List[str] = (labels[..., None] == jnp.arange(_UpperCamelCase )[None]).astype('''f4''' )
lowercase : List[str] = jax.nn.log_softmax(_UpperCamelCase, axis=-1 )
lowercase : str = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
lowercase : int = reduction(_UpperCamelCase )
return loss
lowercase : List[str] = partial(_UpperCamelCase, reduction=jnp.mean )
lowercase : Optional[int] = cross_entropy(_UpperCamelCase, _UpperCamelCase )
lowercase : List[Any] = cross_entropy(_UpperCamelCase, _UpperCamelCase )
lowercase : str = cross_entropy(_UpperCamelCase, _UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __SCREAMING_SNAKE_CASE :
A : str = "google/bigbird-roberta-base"
A : int = 3000
A : int = 1_0500
A : int = 128
A : int = 3
A : int = 1
A : int = 5
# tx_args
A : float = 3E-5
A : float = 0.0
A : int = 2_0000
A : float = 0.0095
A : str = "bigbird-roberta-natural-questions"
A : str = "training-expt"
A : str = "data/nq-training.jsonl"
A : str = "data/nq-validation.jsonl"
def __lowerCamelCase ( self ):
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = os.path.join(self.base_dir , self.save_dir )
lowercase : Dict = self.batch_size_per_device * jax.device_count()
@dataclass
class __SCREAMING_SNAKE_CASE :
A : int
A : int = 4096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = self.collate_fn(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return batch
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Optional[int] = self.fetch_inputs(features['''input_ids'''] )
lowercase : int = {
'''input_ids''': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = [self._fetch_inputs(SCREAMING_SNAKE_CASE__ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = [1 for _ in range(len(SCREAMING_SNAKE_CASE__ ) )]
while len(SCREAMING_SNAKE_CASE__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None ) ->str:
"""simple docstring"""
if seed is not None:
lowercase : Optional[Any] = dataset.shuffle(seed=_UpperCamelCase )
for i in range(len(_UpperCamelCase ) // batch_size ):
lowercase : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_UpperCamelCase )
@partial(jax.pmap, axis_name='''batch''' )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, **_UpperCamelCase ) ->str:
"""simple docstring"""
def loss_fn(_UpperCamelCase ):
lowercase : Any = model_inputs.pop('''start_labels''' )
lowercase : Any = model_inputs.pop('''end_labels''' )
lowercase : Any = model_inputs.pop('''pooled_labels''' )
lowercase : int = state.apply_fn(**_UpperCamelCase, params=_UpperCamelCase, dropout_rng=_UpperCamelCase, train=_UpperCamelCase )
lowercase , lowercase , lowercase : str = outputs
return state.loss_fn(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, )
lowercase , lowercase : Union[str, Any] = jax.random.split(_UpperCamelCase )
lowercase : Any = jax.value_and_grad(_UpperCamelCase )
lowercase , lowercase : Tuple = grad_fn(state.params )
lowercase : List[Any] = jax.lax.pmean({'''loss''': loss}, axis_name='''batch''' )
lowercase : Optional[int] = jax.lax.pmean(_UpperCamelCase, '''batch''' )
lowercase : List[str] = state.apply_gradients(grads=_UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name='''batch''' )
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : str = model_inputs.pop('''start_labels''' )
lowercase : List[Any] = model_inputs.pop('''end_labels''' )
lowercase : str = model_inputs.pop('''pooled_labels''' )
lowercase : Any = state.apply_fn(**_UpperCamelCase, params=state.params, train=_UpperCamelCase )
lowercase , lowercase , lowercase : Dict = outputs
lowercase : str = state.loss_fn(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : int = jax.lax.pmean({'''loss''': loss}, axis_name='''batch''' )
return metrics
class __SCREAMING_SNAKE_CASE ( train_state.TrainState ):
A : Callable = struct.field(pytree_node=A__ )
@dataclass
class __SCREAMING_SNAKE_CASE :
A : Args
A : Callable
A : Callable
A : Callable
A : Callable
A : wandb
A : Callable = None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : List[Any] = model.params
lowercase : List[str] = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , loss_fn=SCREAMING_SNAKE_CASE__ , )
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase : List[str] = restore_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
lowercase , lowercase : Optional[int] = build_tx(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = train_state.TrainState(
step=SCREAMING_SNAKE_CASE__ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , opt_state=SCREAMING_SNAKE_CASE__ , )
lowercase : int = args
lowercase : List[Any] = data_collator
lowercase : Dict = lr
lowercase : Union[str, Any] = params
lowercase : int = jax_utils.replicate(SCREAMING_SNAKE_CASE__ )
return state
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self.args
lowercase : int = len(SCREAMING_SNAKE_CASE__ ) // args.batch_size
lowercase : List[str] = jax.random.PRNGKey(0 )
lowercase : Tuple = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase : List[str] = jnp.array(0 , dtype=jnp.floataa )
lowercase : Optional[Any] = get_batched_dataset(SCREAMING_SNAKE_CASE__ , args.batch_size , seed=SCREAMING_SNAKE_CASE__ )
lowercase : int = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc=f"""Running EPOCH-{epoch}""" ):
lowercase : str = self.data_collator(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : int = self.train_step_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
lowercase : Optional[int] = jax_utils.unreplicate(state.step )
lowercase : List[Any] = running_loss.item() / i
lowercase : Optional[int] = self.scheduler_fn(state_step - 1 )
lowercase : List[str] = self.evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE__ ) )
self.logger.log(SCREAMING_SNAKE_CASE__ , commit=SCREAMING_SNAKE_CASE__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = get_batched_dataset(SCREAMING_SNAKE_CASE__ , self.args.batch_size )
lowercase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) // self.args.batch_size
lowercase : int = jnp.array(0 , dtype=jnp.floataa )
lowercase : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc='''Evaluating ... ''' ):
lowercase : str = self.data_collator(SCREAMING_SNAKE_CASE__ )
lowercase : str = self.val_step_fn(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = jax_utils.unreplicate(SCREAMING_SNAKE_CASE__ )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(SCREAMING_SNAKE_CASE__ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE__ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE__ , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , SCREAMING_SNAKE_CASE__ )
print('''DONE''' )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[Any]:
"""simple docstring"""
print(f"""RESTORING CHECKPOINT FROM {save_dir}""", end=''' ... ''' )
with open(os.path.join(_UpperCamelCase, '''flax_model.msgpack''' ), '''rb''' ) as f:
lowercase : Any = from_bytes(state.params, f.read() )
with open(os.path.join(_UpperCamelCase, '''opt_state.msgpack''' ), '''rb''' ) as f:
lowercase : Tuple = from_bytes(state.opt_state, f.read() )
lowercase : List[str] = joblib.load(os.path.join(_UpperCamelCase, '''args.joblib''' ) )
lowercase : Optional[Any] = joblib.load(os.path.join(_UpperCamelCase, '''data_collator.joblib''' ) )
with open(os.path.join(_UpperCamelCase, '''training_state.json''' ), '''r''' ) as f:
lowercase : int = json.load(_UpperCamelCase )
lowercase : int = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : Dict = num_train_steps - warmup_steps
lowercase : List[Any] = optax.linear_schedule(init_value=_UpperCamelCase, end_value=_UpperCamelCase, transition_steps=_UpperCamelCase )
lowercase : Optional[int] = optax.linear_schedule(init_value=_UpperCamelCase, end_value=1e-7, transition_steps=_UpperCamelCase )
lowercase : List[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def weight_decay_mask(_UpperCamelCase ):
lowercase : Dict = traverse_util.flatten_dict(_UpperCamelCase )
lowercase : int = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(_UpperCamelCase )
lowercase : Tuple = scheduler_fn(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : Dict = optax.adamw(learning_rate=_UpperCamelCase, weight_decay=_UpperCamelCase, mask=_UpperCamelCase )
return tx, lr
| 337 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
A : Tuple = PegasusConfig
A : Tuple = {}
A : Optional[int] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=40 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , ):
lowercase : List[Any] = parent
lowercase : Dict = batch_size
lowercase : Any = seq_length
lowercase : List[Any] = is_training
lowercase : int = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Any = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : Any = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Optional[Any] = max_position_embeddings
lowercase : Any = eos_token_id
lowercase : Optional[int] = pad_token_id
lowercase : List[Any] = bos_token_id
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : List[str] = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = TFPegasusModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
lowercase : int = inputs_dict['''input_ids''']
lowercase : Any = input_ids[:1, :]
lowercase : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
lowercase : Tuple = inputs_dict['''head_mask''']
lowercase : Optional[int] = 1
# first forward pass
lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
lowercase : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
lowercase : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-3 )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None, _UpperCamelCase=None, _UpperCamelCase=None, _UpperCamelCase=None, _UpperCamelCase=None, ) ->List[Any]:
"""simple docstring"""
if attention_mask is None:
lowercase : Optional[Any] = tf.cast(tf.math.not_equal(_UpperCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
lowercase : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
lowercase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A : Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A : Tuple = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A : str = True
A : Dict = False
A : str = False
def __lowerCamelCase ( self ):
lowercase : Tuple = TFPegasusModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
A : Union[str, Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
A : str = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A : List[str] = 'google/pegasus-xsum'
@cached_property
def __lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = self.translate_src_text(**SCREAMING_SNAKE_CASE__ )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''tf''' )
lowercase : Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=SCREAMING_SNAKE_CASE__ , )
lowercase : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
return generated_words
@slow
def __lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 337 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__a = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__a = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__a = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__a = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__a = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__a = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__a = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase , lowercase : Union[str, Any] = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
lowercase : List[Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase , lowercase : Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( _UpperCamelCase = 100 ) ->Any:
"""simple docstring"""
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('''hand, expected''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : str = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''', generate_random_hands() )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Optional[Any] = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
lowercase : Optional[Any] = poker_hands.copy()
shuffle(_UpperCamelCase )
lowercase : str = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : int = PokerHand('''2C 4S AS 3D 5C''' )
lowercase : Tuple = True
lowercase : str = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Any = 0
lowercase : str = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
lowercase : Dict = os.path.join(_UpperCamelCase, '''poker_hands.txt''' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
lowercase : Tuple = line[:14].strip()
lowercase : Dict = line[15:].strip()
lowercase , lowercase : Optional[int] = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
lowercase : str = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 337 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 1 |
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
lowercase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowercase : Dict = False
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
lowercase : Dict = TrieNode()
lowercase : List[Any] = curr.nodes[char]
lowercase : Dict = True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = self
for char in word:
if char not in curr.nodes:
return False
lowercase : Dict = curr.nodes[char]
return curr.is_leaf
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
def _delete(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
if index == len(SCREAMING_SNAKE_CASE__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase : Optional[Any] = False
return len(curr.nodes ) == 0
lowercase : Union[str, Any] = word[index]
lowercase : Tuple = curr.nodes.get(SCREAMING_SNAKE_CASE__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase : Tuple = _delete(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE__ , 0 )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->None:
"""simple docstring"""
if node.is_leaf:
print(_UpperCamelCase, end=''' ''' )
for key, value in node.nodes.items():
print_words(_UpperCamelCase, word + key )
def __lowercase ( ) ->bool:
"""simple docstring"""
lowercase : List[Any] = '''banana bananas bandana band apple all beast'''.split()
lowercase : Optional[int] = TrieNode()
root.insert_many(_UpperCamelCase )
# print_words(root, "")
assert all(root.find(_UpperCamelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->None:
"""simple docstring"""
print(str(_UpperCamelCase ), '''works!''' if passes else '''doesn\'t work :(''' )
def __lowercase ( ) ->None:
"""simple docstring"""
assert test_trie()
def __lowercase ( ) ->None:
"""simple docstring"""
print_results('''Testing trie functionality''', test_trie() )
if __name__ == "__main__":
main()
| 337 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 1 |
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = [0] * len_array
if len_array > 0:
lowercase : Tuple = array[0]
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = self.prefix_sum[i - 1] + array[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(SCREAMING_SNAKE_CASE__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__a = parser.parse_args()
__a = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__a = CLIPImageProcessor()
__a = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__a = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = ['image_processor']
A : Optional[Any] = 'SamImageProcessor'
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = self.image_processor
lowercase : List[Any] = -10
lowercase : Dict = self.image_processor.size['''longest_edge''']
def __call__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Optional[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# pop arguments that are not used in the foward but used nevertheless
lowercase : List[Any] = encoding_image_processor['''original_sizes''']
if hasattr(SCREAMING_SNAKE_CASE__ , '''numpy''' ): # Checks if Torch or TF tensor
lowercase : List[str] = original_sizes.numpy()
lowercase , lowercase , lowercase : List[Any] = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , input_boxes=SCREAMING_SNAKE_CASE__ , )
lowercase : str = self._normalize_and_convert(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , input_boxes=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , )
return encoding_image_processor
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="pt" , ):
if input_points is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , original_sizes[0] ) for point in input_points
]
else:
lowercase : Optional[int] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for point, original_size in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowercase , lowercase : Any = self._pad_points_and_labels(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Any = np.array(SCREAMING_SNAKE_CASE__ )
if input_labels is not None:
lowercase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE__ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
lowercase : int = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE__ )
for box in input_boxes
]
else:
lowercase : int = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , is_bounding_box=SCREAMING_SNAKE_CASE__ )
for box, original_size in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
lowercase : Any = np.array(SCREAMING_SNAKE_CASE__ )
if input_boxes is not None:
if return_tensors == "pt":
lowercase : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# boxes batch size of 1 by default
lowercase : List[str] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowercase : Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# boxes batch size of 1 by default
lowercase : Optional[int] = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowercase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase : Union[str, Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowercase : Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowercase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowercase : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
# point batch size of 1 by default
lowercase : Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = max([point.shape[0] for point in input_points] )
lowercase : str = []
for i, point in enumerate(SCREAMING_SNAKE_CASE__ ):
if point.shape[0] != expected_nb_points:
lowercase : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowercase : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = processed_input_points
return input_points, input_labels
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
lowercase , lowercase : Union[str, Any] = original_size
lowercase , lowercase : List[Any] = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE__ , longest_edge=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = deepcopy(SCREAMING_SNAKE_CASE__ ).astype(SCREAMING_SNAKE_CASE__ )
if is_bounding_box:
lowercase : Any = coords.reshape(-1 , 2 , 2 )
lowercase : Any = coords[..., 0] * (new_w / old_w)
lowercase : List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase : Union[str, Any] = coords.reshape(-1 , 4 )
return coords
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , '''numpy''' ): # Checks for TF or Torch tensor
lowercase : Tuple = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Input points must be a list of list of floating points.''' )
lowercase : Optional[Any] = [np.array(SCREAMING_SNAKE_CASE__ ) for input_point in input_points]
else:
lowercase : Dict = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , '''numpy''' ):
lowercase : str = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Input labels must be a list of list integers.''' )
lowercase : Tuple = [np.array(SCREAMING_SNAKE_CASE__ ) for label in input_labels]
else:
lowercase : Any = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE__ , '''numpy''' ):
lowercase : Tuple = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE__ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE__ )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
lowercase : List[str] = [np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) for box in input_boxes]
else:
lowercase : List[Any] = None
return input_points, input_labels, input_boxes
@property
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , ):
lowercase : int = size if size is not None else {'''height''': 18, '''width''': 18}
lowercase : Tuple = parent
lowercase : int = batch_size
lowercase : Optional[Any] = num_channels
lowercase : Tuple = image_size
lowercase : int = min_resolution
lowercase : str = max_resolution
lowercase : Dict = do_resize
lowercase : List[Any] = size
lowercase : List[str] = do_normalize
lowercase : Any = image_mean
lowercase : Union[str, Any] = image_std
def __lowerCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : str = DPTImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
def __lowerCamelCase ( self ):
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCamelCase ( self ):
# Initialize image_processing
lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
lowercase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : List[str] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : Any = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 337 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 1 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase = " " ) ->list:
"""simple docstring"""
lowercase : Tuple = []
lowercase : str = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase : Dict = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 337 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__a = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'maskformer'
A : Tuple = {'hidden_size': 'mask_feature_size'}
A : List[str] = ['resnet', 'swin']
A : List[Any] = ['detr']
def __init__( self , SCREAMING_SNAKE_CASE__ = 256 , SCREAMING_SNAKE_CASE__ = 256 , SCREAMING_SNAKE_CASE__ = 0.1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0.02 , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = 20.0 , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase : Dict = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = backbone_config.pop('''model_type''' )
lowercase : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase : str = DetrConfig()
else:
# verify that the decoder is supported
lowercase : Optional[int] = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase : Dict = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = backbone_config
lowercase : int = decoder_config
# main feature dimension for the model
lowercase : int = fpn_feature_size
lowercase : Dict = mask_feature_size
# initializer
lowercase : str = init_std
lowercase : Optional[int] = init_xavier_std
# Hungarian matcher && loss
lowercase : List[Any] = cross_entropy_weight
lowercase : List[Any] = dice_weight
lowercase : Optional[int] = mask_weight
lowercase : Optional[int] = use_auxiliary_loss
lowercase : Any = no_object_weight
lowercase : List[str] = output_auxiliary_logits
lowercase : List[Any] = self.decoder_config.encoder_attention_heads
lowercase : int = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , decoder_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self ):
lowercase : str = copy.deepcopy(self.__dict__ )
lowercase : List[Any] = self.backbone_config.to_dict()
lowercase : Any = self.decoder_config.to_dict()
lowercase : List[Any] = self.__class__.model_type
return output
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=17 , SCREAMING_SNAKE_CASE__=23 , SCREAMING_SNAKE_CASE__=11 , SCREAMING_SNAKE_CASE__=True , ):
lowercase : str = parent
lowercase : Dict = batch_size
lowercase : List[str] = seq_length
lowercase : Optional[Any] = act_dim
lowercase : Optional[int] = state_dim
lowercase : Any = hidden_size
lowercase : List[Any] = max_length
lowercase : List[Any] = is_training
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase : int = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase : str = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length) )
lowercase : Dict = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __lowerCamelCase ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
lowercase : Tuple = DecisionTransformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str = config_and_inputs
lowercase : Optional[int] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
A : List[str] = (DecisionTransformerModel,) if is_torch_available() else ()
A : int = ()
A : str = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A : Optional[int] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A : List[str] = False
A : Optional[int] = False
A : List[Any] = False
A : Any = False
A : List[Any] = False
A : Dict = False
A : str = False
A : Any = False
A : Dict = False
def __lowerCamelCase ( self ):
lowercase : Optional[int] = DecisionTransformerModelTester(self )
lowercase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] = DecisionTransformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Optional[int] = [*signature.parameters.keys()]
lowercase : Optional[int] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
lowercase : str = 2 # number of steps of autoregressive prediction we will perform
lowercase : Tuple = 10 # defined by the RL environment, may be normalized
lowercase : Optional[int] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase : Dict = model.to(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = model.config
torch.manual_seed(0 )
lowercase : List[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) # env.reset()
lowercase : Tuple = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = torch.tensor(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase : Dict = state
lowercase : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
lowercase : List[str] = torch.zeros(1 , 0 , device=SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
lowercase : Union[str, Any] = torch.tensor(0 , device=SCREAMING_SNAKE_CASE__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=SCREAMING_SNAKE_CASE__ )] , dim=1 )
lowercase : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=SCREAMING_SNAKE_CASE__ )] , dim=1 )
lowercase : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase , lowercase , lowercase : Dict = model(
states=SCREAMING_SNAKE_CASE__ , actions=SCREAMING_SNAKE_CASE__ , rewards=SCREAMING_SNAKE_CASE__ , returns_to_go=SCREAMING_SNAKE_CASE__ , timesteps=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowercase , lowercase , lowercase , lowercase : List[str] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase : Dict = action_pred[0, -1]
lowercase : Optional[Any] = torch.cat([states, state] , dim=1 )
lowercase : Tuple = returns_to_go[0, -1] - reward
lowercase : int = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase : Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=SCREAMING_SNAKE_CASE__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 337 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
lowercase : str = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('''123456789''' )
def __lowercase ( ) ->int | None:
"""simple docstring"""
for base_num in range(9999, 4999, -1 ):
lowercase : Optional[Any] = 100002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333, 99, -1 ):
lowercase : Union[str, Any] = 1002003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__a = True
except (ImportError, AttributeError):
__a = object
def __lowercase ( *_UpperCamelCase, **_UpperCamelCase ) ->str:
"""simple docstring"""
pass
__a = False
__a = logging.get_logger('''transformers-cli/serving''')
def __lowercase ( _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : List[str] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(_UpperCamelCase, args.host, args.port, args.workers )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : dict
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str]
A : Optional[List[int]]
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any
class __SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=SCREAMING_SNAKE_CASE__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=SCREAMING_SNAKE_CASE__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=SCREAMING_SNAKE_CASE__ , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=SCREAMING_SNAKE_CASE__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=SCREAMING_SNAKE_CASE__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=SCREAMING_SNAKE_CASE__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = pipeline
lowercase : List[Any] = host
lowercase : str = port
lowercase : List[str] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
lowercase : List[str] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=['''POST'''] , ),
] , timeout=600 , )
def __lowerCamelCase ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __lowerCamelCase ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ):
try:
lowercase : Optional[int] = self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
if return_ids:
lowercase : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ , tokens_ids=SCREAMING_SNAKE_CASE__ )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(SCREAMING_SNAKE_CASE__ )} )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , ):
try:
lowercase : Optional[Any] = self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return ServeDeTokenizeResult(model='''''' , text=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(SCREAMING_SNAKE_CASE__ )} )
async def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ):
# Check we don't have empty string
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowercase : Optional[Any] = self._pipeline(SCREAMING_SNAKE_CASE__ )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(500 , {'''error''': str(SCREAMING_SNAKE_CASE__ )} )
| 337 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__a = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
__a = '''▁'''
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = AlbertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , **SCREAMING_SNAKE_CASE__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Optional[int] = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = do_lower_case
lowercase : List[str] = remove_space
lowercase : Union[str, Any] = keep_accents
lowercase : int = vocab_file
lowercase : List[str] = False if not self.vocab_file else True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 337 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 1 |
import os
import numpy
import onnx
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[int] = a.name
lowercase : Union[str, Any] = b.name
lowercase : Optional[Any] = ''''''
lowercase : Optional[int] = ''''''
lowercase : Tuple = a == b
lowercase : str = name_a
lowercase : Any = name_b
return res
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_UpperCamelCase, _UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, _UpperCamelCase, _UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g, _UpperCamelCase, _UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = list(model.graph.initializer )
lowercase : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase : str = inits[i].name
lowercase : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Tuple = os.path.dirname(_UpperCamelCase )
lowercase : Optional[Any] = os.path.basename(_UpperCamelCase )
lowercase : int = onnx.load(os.path.join(_UpperCamelCase, _UpperCamelCase ) )
lowercase : Tuple = list(model.graph.initializer )
lowercase : str = set()
lowercase : List[Any] = {}
lowercase : Tuple = []
lowercase : str = 0
for i in range(len(_UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(_UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(_UpperCamelCase )
dup_set.add(_UpperCamelCase )
lowercase : Tuple = inits[j].data_type
lowercase : Union[str, Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', _UpperCamelCase )
total_reduced_size += mem_size
lowercase : Optional[int] = inits[i].name
lowercase : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_UpperCamelCase )
else:
lowercase : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
lowercase : Union[str, Any] = sorted(_UpperCamelCase )
_remove_dup_initializers_from_model(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : List[str] = '''optimized_''' + model_file_name
lowercase : Dict = os.path.join(_UpperCamelCase, _UpperCamelCase )
onnx.save(_UpperCamelCase, _UpperCamelCase )
return new_model
| 337 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 1 |
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : List[Any] = text, pattern
lowercase , lowercase : Any = len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCamelCase ( self ):
# searches pattern in text and returns index positions
lowercase : List[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase : Union[str, Any] = self.mismatch_in_text(SCREAMING_SNAKE_CASE__ )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE__ )
else:
lowercase : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowercase : Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__a = '''ABAABA'''
__a = '''AB'''
__a = BoyerMooreSearch(text, pattern)
__a = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowercase : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase : Dict = torch.permute(_UpperCamelCase, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ):
# linear layer
lowercase : str = flax_key_tuple[:-1] + ('''weight''',)
lowercase : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase : int = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
if "metadata" in layer:
lowercase : str = layer.split('''metadata''' )
lowercase : Optional[int] = ''''''.join(split_layer[0] )[:-1]
lowercase : Tuple = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
lowercase : Optional[int] = layer.split('''kvstore''' )
lowercase : str = ''''''.join(split_layer[0] )[:-1]
lowercase : Optional[int] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
lowercase : List[str] = layer.split('''/''' )
lowercase : Dict = '''/'''.join(split_layer[:-1] )
lowercase : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
lowercase : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowercase : Optional[int] = '''file'''
else:
lowercase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Optional[Any] = rename_keys(_UpperCamelCase )
lowercase : int = {}
for k, v in current_block.items():
lowercase : Tuple = v
lowercase : Optional[int] = new_current_block
torch.save(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = WEIGHTS_NAME ) ->Dict:
"""simple docstring"""
lowercase : str = convert_file_size_to_int(_UpperCamelCase )
lowercase : Optional[Any] = []
lowercase : List[str] = {}
lowercase : Optional[int] = 0
lowercase : Tuple = 0
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''', '''rb''' ) as fp:
lowercase : Union[str, Any] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
lowercase : Any = flatten_dict(_UpperCamelCase, sep='''/''' )
lowercase : str = {}
for layer in checkpoint_info.keys():
lowercase , lowercase , lowercase : int = get_key_and_tensorstore_dict(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if curr_real_layer_name in all_layers:
lowercase : Union[str, Any] = content
else:
lowercase : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowercase : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowercase : List[Any] = torch.tensor(_UpperCamelCase )
lowercase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowercase , lowercase : Optional[int] = rename_base_flax_keys(tuple(key.split('''/''' ) ), _UpperCamelCase )
lowercase : Union[str, Any] = '''/'''.join(_UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowercase : Dict = os.path.join(
_UpperCamelCase, weights_name.replace('''.bin''', f"""-{len(_UpperCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_UpperCamelCase, _UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowercase : int = {}
lowercase : List[str] = 0
lowercase : List[Any] = raw_weights.to(getattr(_UpperCamelCase, _UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowercase : str = os.path.join(_UpperCamelCase, weights_name.replace('''.bin''', f"""-{len(_UpperCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_UpperCamelCase, _UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowercase : List[str] = {}
lowercase : List[str] = {}
for idx, shard in enumerate(_UpperCamelCase ):
lowercase : Optional[Any] = weights_name.replace(
'''.bin''', f"""-{idx+1:05d}-of-{len(_UpperCamelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
lowercase : int = os.path.join(_UpperCamelCase, weights_name.replace('''.bin''', f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_UpperCamelCase, os.path.join(_UpperCamelCase, _UpperCamelCase ) )
lowercase : List[Any] = shard
for key in shard:
lowercase : Dict = shard_file
# Add the metadata
lowercase : Tuple = {'''total_size''': total_size}
lowercase : List[str] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCamelCase, _UpperCamelCase ), '''w''', encoding='''utf-8''' ) as f:
lowercase : Optional[int] = json.dumps(_UpperCamelCase, indent=2, sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
return metadata, index
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__a = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowercase ( ) ->Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowercase : Dict = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
lowercase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''', device_map='''auto''' )
lowercase : str = TaTokenizer.from_pretrained('''t5-small''' )
lowercase : int = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowercase : Optional[Any] = tokenizer(_UpperCamelCase, return_tensors='''pt''' ).input_ids
lowercase : Optional[Any] = model.generate(_UpperCamelCase, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 337 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowercase ( _UpperCamelCase = "laptop" ) ->DataFrame:
"""simple docstring"""
lowercase : Union[str, Any] = f"""https://www.amazon.in/laptop/s?k={product}"""
lowercase : Union[str, Any] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
lowercase : int = BeautifulSoup(requests.get(_UpperCamelCase, headers=_UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
lowercase : List[Any] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''', attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''}, ), soup.find_all('''div''', attrs={'''class''': '''a-row a-size-base a-color-base'''} ), ):
try:
lowercase : str = item.ha.text
lowercase : List[str] = '''https://www.amazon.in/''' + item.ha.a['''href''']
lowercase : Union[str, Any] = item.find('''span''', attrs={'''class''': '''a-offscreen'''} ).text
try:
lowercase : Optional[Any] = item.find('''span''', attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
lowercase : int = '''Not available'''
try:
lowercase : Any = (
'''₹'''
+ item.find(
'''span''', attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
lowercase : int = ''''''
try:
lowercase : List[Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''', '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
)
* 100 )
except ValueError:
lowercase : Optional[int] = float('''nan''' )
except AttributeError:
pass
lowercase : List[Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase : List[str] = ''' '''
lowercase : str = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__a = '''headphones'''
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 337 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''DeiTFeatureExtractor''']
__a = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337 | 1 |
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
lowercase : Optional[Any] = 0
lowercase : Dict = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
lowercase : Union[str, Any] = [int(_UpperCamelCase ) for i in num_string]
lowercase : List[Any] = 1
for i in range(0, len(_UpperCamelCase ) ):
total *= numbers[i]
lowercase : Tuple = str(_UpperCamelCase )
steps += 1
return steps
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
lowercase : Union[str, Any] = 0
lowercase : Dict = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
lowercase : Dict = [int(_UpperCamelCase ) for i in num_string]
lowercase : Union[str, Any] = 0
for i in range(0, len(_UpperCamelCase ) ):
total += numbers[i]
lowercase : Optional[int] = str(_UpperCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[PIL.Image.Image], np.ndarray]
A : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 337 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''-m''', '''--pretrained_model_name_or_path''', type=_UpperCamelCase, default=_UpperCamelCase, required=_UpperCamelCase, help='''Path to pretrained model or model identifier from huggingface.co/models.''', )
parser.add_argument(
'''-c''', '''--caption''', type=_UpperCamelCase, default='''robotic cat with wings''', help='''Text used to generate images.''', )
parser.add_argument(
'''-n''', '''--images_num''', type=_UpperCamelCase, default=4, help='''How much images to generate.''', )
parser.add_argument(
'''-s''', '''--seed''', type=_UpperCamelCase, default=42, help='''Seed for random process.''', )
parser.add_argument(
'''-ci''', '''--cuda_id''', type=_UpperCamelCase, default=0, help='''cuda_id.''', )
lowercase : Any = parser.parse_args()
return args
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
if not len(_UpperCamelCase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase , lowercase : Optional[Any] = imgs[0].size
lowercase : List[str] = Image.new('''RGB''', size=(cols * w, rows * h) )
lowercase , lowercase : Optional[int] = grid.size
for i, img in enumerate(_UpperCamelCase ):
grid.paste(_UpperCamelCase, box=(i % cols * w, i // cols * h) )
return grid
def __lowercase ( _UpperCamelCase, _UpperCamelCase="robotic cat with wings", _UpperCamelCase=7.5, _UpperCamelCase=50, _UpperCamelCase=1, _UpperCamelCase=42, ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = torch.Generator(pipeline.device ).manual_seed(_UpperCamelCase )
lowercase : Optional[int] = pipeline(
_UpperCamelCase, guidance_scale=_UpperCamelCase, num_inference_steps=_UpperCamelCase, generator=_UpperCamelCase, num_images_per_prompt=_UpperCamelCase, ).images
lowercase : Any = int(math.sqrt(_UpperCamelCase ) )
lowercase : Dict = image_grid(_UpperCamelCase, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__a = unet.to(torch.device('''cuda''', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 337 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__a = '''hf-internal-testing/tiny-random-bert'''
__a = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
__a = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(SCREAMING_SNAKE_CASE__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''refs''' , '''main''' ) ) as f:
lowercase : int = f.read()
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''snapshots''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE__ ) )
# File is cached at the same place the second time.
lowercase : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Using a specific revision to test the full commit hash.
lowercase : Union[str, Any] = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision='''9b8c223''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''snapshots''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , '''is not a valid model identifier''' ):
lowercase : Optional[Any] = cached_file('''tiny-random-bert''' , SCREAMING_SNAKE_CASE__ )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , '''is not a valid git identifier''' ):
lowercase : Dict = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision='''aaaa''' )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , '''does not appear to have a file named''' ):
lowercase : List[Any] = cached_file(SCREAMING_SNAKE_CASE__ , '''conf''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , '''does not appear to have a file named''' ):
lowercase : int = cached_file(SCREAMING_SNAKE_CASE__ , '''conf''' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''refs''' , '''main''' ) ) as f:
lowercase : Tuple = f.read()
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '''.no_exist''' , SCREAMING_SNAKE_CASE__ , '''conf''' ) ) )
lowercase : Any = cached_file(SCREAMING_SNAKE_CASE__ , '''conf''' , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = cached_file(SCREAMING_SNAKE_CASE__ , '''conf''' , local_files_only=SCREAMING_SNAKE_CASE__ , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = mock.Mock()
lowercase : str = 500
lowercase : str = {}
lowercase : Dict = HTTPError
lowercase : Union[str, Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
lowercase : Optional[int] = cached_file(SCREAMING_SNAKE_CASE__ , '''conf''' , _raise_exceptions_for_connection_errors=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , SCREAMING_SNAKE_CASE__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , SCREAMING_SNAKE_CASE__ , revision='''ahaha''' )
lowercase : List[Any] = get_file_from_repo('''bert-base-cased''' , SCREAMING_SNAKE_CASE__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase : Union[str, Any] = json.loads(open(SCREAMING_SNAKE_CASE__ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(SCREAMING_SNAKE_CASE__ , '''a.txt''' ) , str(SCREAMING_SNAKE_CASE__ ) )
self.assertIsNone(get_file_from_repo(SCREAMING_SNAKE_CASE__ , '''b.txt''' ) )
| 337 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__a = logging.get_logger(__name__)
__a = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
__a = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__a = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__a = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
__a = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
__a = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
__a = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
__a = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
__a = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
__a = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
__a = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
__a = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
__a = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
__a = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Union[str, Any] = FLAX_MODEL_MAPPING
__a = auto_class_update(FlaxAutoModel)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__a = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__a = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__a = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__a = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__a = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__a = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__a = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__a = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__a = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class __SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
A : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__a = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 337 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a = get_logger()
__a = None
class __SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
super().__init__(features=SCREAMING_SNAKE_CASE__ )
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE__ )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
lowercase : int = device if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
lowercase : Any = str(jax.devices()[0] )
lowercase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
import jax
return {str(SCREAMING_SNAKE_CASE__ ): device for device in jax.devices()}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
return column
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ , (str, bytes, type(SCREAMING_SNAKE_CASE__ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : int = {}
if isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase : str = {'''dtype''': jnp.intaa}
else:
lowercase : Optional[int] = {'''dtype''': jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : Optional[int] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowercase : int = np.asarray(SCREAMING_SNAKE_CASE__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase : List[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(SCREAMING_SNAKE_CASE__ , '''__array__''' ) and not isinstance(SCREAMING_SNAKE_CASE__ , jax.Array ):
lowercase : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE__ , map_list=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE__ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE__ )
lowercase : int = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE__ , pa_table.column_names[0] )
lowercase : str = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self._consolidate(SCREAMING_SNAKE_CASE__ )
return column
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
for column_name in batch:
lowercase : Any = self._consolidate(batch[column_name] )
return batch
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not nums:
return 0
lowercase : Optional[int] = nums[0]
lowercase : Union[str, Any] = 0
for num in nums[1:]:
lowercase , lowercase : Optional[Any] = (
max_excluding + num,
max(_UpperCamelCase, _UpperCamelCase ),
)
return max(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=0 , ):
lowercase : List[Any] = parent
lowercase : str = batch_size
lowercase : Optional[int] = seq_length
lowercase : Union[str, Any] = is_training
lowercase : Optional[Any] = use_input_mask
lowercase : str = use_token_type_ids
lowercase : List[Any] = use_labels
lowercase : List[Any] = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : int = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Tuple = type_sequence_label_size
lowercase : int = initializer_range
lowercase : Any = num_labels
lowercase : Optional[int] = num_choices
lowercase : int = scope
lowercase : str = projection_dim
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : int = None
if self.use_token_type_ids:
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[Any] = None
lowercase : int = None
lowercase : Any = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : str = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
lowercase : Optional[int] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : int = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A : Dict = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
A : str = False
A : Tuple = False
A : Tuple = False
A : List[Any] = False
A : Union[str, Any] = False
def __lowerCamelCase ( self ):
lowercase : Any = TFDPRModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
lowercase : List[Any] = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowercase : int = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase : Optional[Any] = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 337 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase ) ->List[int]:
"""simple docstring"""
if isinstance(_UpperCamelCase, np.ndarray ):
return list(tensor.shape )
lowercase : Optional[Any] = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
lowercase : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase : int = [1] * inputs.shape.rank
lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis]
lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase )
lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase : List[str] = tf.nn.batch_normalization(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, )
return outputs
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase : Dict = tf.shape(_UpperCamelCase )
lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 )
return tf.reshape(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(_UpperCamelCase, tf.Tensor ):
lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase : str = (
tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
_UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
), )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowercase : Any = np.asarray(_UpperCamelCase )
lowercase : List[Any] = 1
lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
lowercase : Optional[int] = chunk_data
else:
lowercase : int = data
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if name in group.attrs:
lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase : Optional[Any] = []
lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase, axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
| 337 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__a = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=19 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE__=25 , SCREAMING_SNAKE_CASE__=5 , ):
lowercase : Tuple = d_model
lowercase : Optional[int] = parent
lowercase : Tuple = batch_size
lowercase : Union[str, Any] = prediction_length
lowercase : Union[str, Any] = context_length
lowercase : List[Any] = cardinality
lowercase : Optional[int] = num_time_features
lowercase : Dict = lags_sequence
lowercase : Any = embedding_dimension
lowercase : Union[str, Any] = is_training
lowercase : Tuple = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Any = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : List[str] = attention_probs_dropout_prob
lowercase : Optional[int] = context_length
lowercase : Any = prediction_length + label_length
lowercase : int = label_length
lowercase : Any = moving_average
lowercase : Optional[Any] = autocorrelation_factor
def __lowerCamelCase ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = config.context_length + max(config.lags_sequence )
lowercase : str = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowercase : str = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowercase : int = floats_tensor([self.batch_size, _past_length] )
lowercase : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowercase : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length] )
lowercase : Any = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __lowerCamelCase ( self ):
lowercase : Tuple = self.get_config()
lowercase : Any = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowerCamelCase ( self ):
lowercase , lowercase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = AutoformerModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
lowercase : Dict = outputs.encoder_last_hidden_state
lowercase : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Tuple = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : str = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase , lowercase , lowercase : int = model.create_network_inputs(**SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : int = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowercase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowercase : List[str] = encoder(inputs_embeds=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowercase : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowercase : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowercase : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowercase : Tuple = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = decoder(
trend=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Optional[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A : str = (AutoformerForPrediction,) if is_torch_available() else ()
A : List[Any] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
A : Optional[int] = False
A : Dict = False
A : Union[str, Any] = False
A : Optional[Any] = False
A : List[str] = False
A : Any = False
def __lowerCamelCase ( self ):
lowercase : str = AutoformerModelTester(self )
lowercase : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Any = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info['''missing_keys'''] , [] )
def __lowerCamelCase ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase : Tuple = inspect.signature(getattr(SCREAMING_SNAKE_CASE__ , '''forward''' ) )
# The main input is the name of the argument after `self`
lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase , lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[Any] = [*signature.parameters.keys()]
lowercase : List[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[Any] = True
lowercase : Dict = getattr(self.model_tester , '''seq_length''' , SCREAMING_SNAKE_CASE__ )
lowercase : Any = getattr(self.model_tester , '''decoder_seq_length''' , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = getattr(self.model_tester , '''encoder_seq_length''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = getattr(self.model_tester , '''d_model''' , SCREAMING_SNAKE_CASE__ )
lowercase : Any = getattr(self.model_tester , '''num_attention_heads''' , SCREAMING_SNAKE_CASE__ )
lowercase : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowercase : Optional[Any] = True
lowercase : Tuple = False
lowercase : Optional[int] = True
lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowercase : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowercase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase : List[Any] = True
lowercase : Dict = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowercase : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowercase : Any = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowercase : str = len(SCREAMING_SNAKE_CASE__ )
lowercase : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# decoder attentions
lowercase : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowercase : Any = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowercase : Any = True
lowercase : Dict = True
lowercase : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowercase : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE__ ) )
lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __lowerCamelCase ( self ):
super().test_retain_grad_hidden_states_attentions()
def __lowercase ( _UpperCamelCase="train-batch.pt" ) ->int:
"""simple docstring"""
lowercase : Optional[int] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''', filename=_UpperCamelCase, repo_type='''dataset''' )
lowercase : Optional[Any] = torch.load(_UpperCamelCase, map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : List[str] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prepare_batch()
with torch.no_grad():
lowercase : Any = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
lowercase : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : str = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowercase : Optional[int] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : List[Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowercase : Tuple = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
lowercase : Any = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE__ )
lowercase : Any = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE__ , rtol=1E-1 ) )
| 337 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''YolosFeatureExtractor''']
__a = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'umt5'
A : List[Any] = ['past_key_values']
def __init__( self , SCREAMING_SNAKE_CASE__=250112 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__="gated-gelu" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="T5Tokenizer" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
is_encoder_decoder=SCREAMING_SNAKE_CASE__ , tokenizer_class=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = vocab_size
lowercase : Optional[int] = d_model
lowercase : Tuple = d_kv
lowercase : Union[str, Any] = d_ff
lowercase : List[str] = num_layers
lowercase : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase : List[str] = num_heads
lowercase : Union[str, Any] = relative_attention_num_buckets
lowercase : Tuple = relative_attention_max_distance
lowercase : Union[str, Any] = dropout_rate
lowercase : Any = layer_norm_epsilon
lowercase : Optional[int] = initializer_factor
lowercase : List[Any] = feed_forward_proj
lowercase : Union[str, Any] = use_cache
lowercase : int = self.feed_forward_proj.split('''-''' )
lowercase : Tuple = act_info[-1]
lowercase : Tuple = act_info[0] == '''gated'''
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
lowercase : str = '''gelu_new'''
@property
def __lowerCamelCase ( self ):
return self.d_model
@property
def __lowerCamelCase ( self ):
return self.num_heads
@property
def __lowerCamelCase ( self ):
return self.num_layers
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCamelCase ( self ):
lowercase : Any = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase : Dict = '''past_encoder_sequence + sequence'''
lowercase : Optional[int] = {0: '''batch'''}
lowercase : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCamelCase ( self ):
return 13
@property
def __lowerCamelCase ( self ):
return 5E-4
| 337 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowercase ( _UpperCamelCase = 8 ) ->str:
"""simple docstring"""
lowercase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
i -= len(_UpperCamelCase )
lowercase : Dict = i // 3
lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase : Union[str, Any] = (
chars_incl
+ random(_UpperCamelCase, quotient + remainder )
+ random(_UpperCamelCase, _UpperCamelCase )
+ random(_UpperCamelCase, _UpperCamelCase )
)
lowercase : Union[str, Any] = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase : str = any(char in ascii_uppercase for char in password )
lowercase : List[str] = any(char in ascii_lowercase for char in password )
lowercase : Dict = any(char in digits for char in password )
lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase : Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''', password_generator(_UpperCamelCase ) )
print(
'''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 337 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 |
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = jnp.ones((batch_size, length) ) / length
return scores
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = None
lowercase : str = 20
lowercase : Optional[Any] = self._get_uniform_logits(batch_size=2 , length=SCREAMING_SNAKE_CASE__ )
# tweak scores to not be uniform anymore
lowercase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase : List[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase : List[Any] = jax.nn.softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
lowercase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase : Any = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE__ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE__ ) , axis=-1 )
lowercase : Any = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE__ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __lowerCamelCase ( self ):
lowercase : Tuple = None
lowercase : List[str] = 10
lowercase : List[Any] = 2
# create ramp distribution
lowercase : str = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE__ )[None, :] , (batch_size, vocab_size) ).copy()
lowercase : str = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase : Tuple = FlaxTopKLogitsWarper(3 )
lowercase : Optional[Any] = top_k_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase : List[str] = 5
lowercase : Tuple = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowercase : Optional[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE__ )[None, :] , (batch_size, length) ).copy()
lowercase : List[Any] = top_k_warp_safety_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = None
lowercase : int = 10
lowercase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase : str = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase : List[str] = FlaxTopPLogitsWarper(0.8 )
lowercase : List[str] = np.exp(top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowercase : List[str] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowercase : Tuple = top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __lowerCamelCase ( self ):
lowercase : List[Any] = 20
lowercase : Optional[Any] = 4
lowercase : Tuple = 0
lowercase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=SCREAMING_SNAKE_CASE__ )
# check that min length is applied at length 5
lowercase : Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 )
lowercase : int = 5
lowercase : Dict = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = min_dist_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowercase : List[str] = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = 15
lowercase : Tuple = min_dist_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE__ ).any() )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = 20
lowercase : List[str] = 4
lowercase : Dict = 0
lowercase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE__ )
# check that all scores are -inf except the bos_token_id score
lowercase : Optional[int] = ids_tensor((batch_size, 1) , vocab_size=20 )
lowercase : Optional[int] = 1
lowercase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase : Dict = 3
lowercase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE__ ).any() )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = 20
lowercase : Any = 4
lowercase : Any = 0
lowercase : List[Any] = 5
lowercase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
lowercase : Tuple = 4
lowercase : List[Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase : Any = 3
lowercase : List[Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = logits_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE__ ).any() )
def __lowerCamelCase ( self ):
lowercase : List[str] = 4
lowercase : int = 10
lowercase : Union[str, Any] = 15
lowercase : List[Any] = 2
lowercase : Union[str, Any] = 1
lowercase : int = 15
# dummy input_ids and scores
lowercase : Tuple = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = input_ids.copy()
lowercase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = scores.copy()
# instantiate all dist processors
lowercase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
lowercase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = 10
# no processor list
lowercase : Optional[int] = temp_dist_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : Any = top_k_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : Any = top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : int = min_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = bos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = eos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# with processor list
lowercase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase : Dict = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __lowerCamelCase ( self ):
lowercase : Tuple = 4
lowercase : Optional[int] = 10
lowercase : int = 15
lowercase : Tuple = 2
lowercase : List[str] = 1
lowercase : Union[str, Any] = 15
# dummy input_ids and scores
lowercase : Dict = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE__ )
lowercase : Any = input_ids.copy()
lowercase : Optional[int] = self._get_uniform_logits(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = scores.copy()
# instantiate all dist processors
lowercase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase : Dict = FlaxTopKLogitsWarper(3 )
lowercase : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = 10
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = temp_dist_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = top_k_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = top_p_warp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = min_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = bos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = eos_dist_proc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase : Optional[int] = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cur_len=SCREAMING_SNAKE_CASE__ )
return scores
lowercase : Optional[int] = jax.jit(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = jax.jit(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = jitted_run_processor_list(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__a = random.Random()
if is_torch_available():
import torch
def __lowercase ( _UpperCamelCase, _UpperCamelCase=1.0, _UpperCamelCase=None, _UpperCamelCase=None ) ->List[str]:
"""simple docstring"""
if rng is None:
lowercase : List[Any] = global_rng
lowercase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=2000 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=16000 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , ):
lowercase : int = parent
lowercase : Union[str, Any] = batch_size
lowercase : List[str] = min_seq_length
lowercase : int = max_seq_length
lowercase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase : Optional[Any] = feature_size
lowercase : Optional[Any] = padding_value
lowercase : Tuple = sampling_rate
lowercase : str = return_attention_mask
lowercase : List[Any] = do_normalize
def __lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
def _flatten(SCREAMING_SNAKE_CASE__ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
lowercase : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Dict = ASTFeatureExtractor
def __lowerCamelCase ( self ):
lowercase : int = ASTFeatureExtractionTester(self )
def __lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
lowercase : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowercase : Tuple = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test batched
lowercase : Optional[int] = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
lowercase : Optional[int] = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
lowercase : List[str] = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
@require_torch
def __lowerCamelCase ( self ):
import torch
lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
lowercase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase : Optional[int] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase : Tuple = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
from datasets import load_dataset
lowercase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowercase : Optional[int] = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __lowerCamelCase ( self ):
# fmt: off
lowercase : List[Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
lowercase : Optional[int] = self._load_datasamples(1 )
lowercase : Any = ASTFeatureExtractor()
lowercase : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 337 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase , lowercase : Dict = array[indexa], array[indexa]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->None:
"""simple docstring"""
if length > 1:
lowercase : Dict = int(length / 2 )
for i in range(_UpperCamelCase, low + middle ):
comp_and_swap(_UpperCamelCase, _UpperCamelCase, i + middle, _UpperCamelCase )
bitonic_merge(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
bitonic_merge(_UpperCamelCase, low + middle, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->None:
"""simple docstring"""
if length > 1:
lowercase : Dict = int(length / 2 )
bitonic_sort(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, 1 )
bitonic_sort(_UpperCamelCase, low + middle, _UpperCamelCase, 0 )
bitonic_merge(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
__a = input('''Enter numbers separated by a comma:\n''').strip()
__a = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 337 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 1 |
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
lowercase : Tuple = credit_card_number
lowercase : Any = 0
lowercase : Optional[Any] = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase, -1, -2 ):
# double the value of every second digit
lowercase : Any = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowercase : Tuple = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
lowercase : Union[str, Any] = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(_UpperCamelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 337 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->None:
"""simple docstring"""
if start is None:
lowercase : int = 0
if end is None:
lowercase : Optional[int] = len(_UpperCamelCase ) - 1
if start >= end:
return
lowercase : List[Any] = (start + end) // 2
slowsort(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
slowsort(_UpperCamelCase, mid + 1, _UpperCamelCase )
if sequence[end] < sequence[mid]:
lowercase , lowercase : Optional[Any] = sequence[mid], sequence[end]
slowsort(_UpperCamelCase, _UpperCamelCase, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 337 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 1 |
import requests
__a = '''YOUR API KEY'''
def __lowercase ( _UpperCamelCase, _UpperCamelCase = giphy_api_key ) ->list:
"""simple docstring"""
lowercase : Dict = '''+'''.join(query.split() )
lowercase : List[str] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase : str = requests.get(_UpperCamelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__a = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[Any] = (
os.path.join(SCREAMING_SNAKE_CASE__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase : Tuple = Extractor
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase : Union[str, Any] = os.path.abspath(SCREAMING_SNAKE_CASE__ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE__ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE__ ) and os.listdir(SCREAMING_SNAKE_CASE__ ))
)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
lowercase : Tuple = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE__ )
if not extractor_format:
return input_path
lowercase : int = self._get_output_path(SCREAMING_SNAKE_CASE__ )
if self._do_extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output_path
class __SCREAMING_SNAKE_CASE ( A__ ):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
...
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
A : List[bytes] = []
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
return f.read(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = b"" ):
if not magic_number:
lowercase : List[str] = max(len(SCREAMING_SNAKE_CASE__ ) for cls_magic_number in cls.magic_numbers )
try:
lowercase : Tuple = cls.read_magic_number(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE__ ) for cls_magic_number in cls.magic_numbers )
class __SCREAMING_SNAKE_CASE ( A__ ):
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def resolved(SCREAMING_SNAKE_CASE__ ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
def badpath(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).startswith(SCREAMING_SNAKE_CASE__ )
def badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase : Tuple = resolved(os.path.join(SCREAMING_SNAKE_CASE__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = resolved(SCREAMING_SNAKE_CASE__ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tarfile.open(SCREAMING_SNAKE_CASE__ )
tar_file.extractall(SCREAMING_SNAKE_CASE__ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
tar_file.close()
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Optional[Any] = [b'\x1F\x8B']
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with gzip.open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = b"" ):
if super().is_extractable(SCREAMING_SNAKE_CASE__ , magic_number=SCREAMING_SNAKE_CASE__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as fp:
lowercase : Any = _EndRecData(SCREAMING_SNAKE_CASE__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase : Optional[int] = fp.read(SCREAMING_SNAKE_CASE__ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE__ ) == sizeCentralDir:
lowercase : int = struct.unpack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , '''r''' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE__ )
zip_file.close()
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with lzma.open(SCREAMING_SNAKE_CASE__ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = rarfile.RarFile(SCREAMING_SNAKE_CASE__ )
rf.extractall(SCREAMING_SNAKE_CASE__ )
rf.close()
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[Any] = [b'\x28\xb5\x2F\xFD']
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowercase : List[str] = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as ifh, open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Optional[int] = [b'\x42\x5A\x68']
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with bza.open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE__ , '''r''' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[Any] = [b'\x04\x22\x4D\x18']
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ):
return max(
len(SCREAMING_SNAKE_CASE__ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE__ , magic_number_length=SCREAMING_SNAKE_CASE__ )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=SCREAMING_SNAKE_CASE__ , )
lowercase : int = cls.infer_extractor_format(SCREAMING_SNAKE_CASE__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ ): # <Added version="2.4.0"/>
lowercase : Tuple = cls._get_magic_number_max_length()
lowercase : int = cls._read_magic_number(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE__ , magic_number=SCREAMING_SNAKE_CASE__ ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "deprecated" , ):
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , exist_ok=SCREAMING_SNAKE_CASE__ )
# Prevent parallel extractions
lowercase : List[str] = str(Path(SCREAMING_SNAKE_CASE__ ).with_suffix('''.lock''' ) )
with FileLock(SCREAMING_SNAKE_CASE__ ):
shutil.rmtree(SCREAMING_SNAKE_CASE__ , ignore_errors=SCREAMING_SNAKE_CASE__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=SCREAMING_SNAKE_CASE__ , )
lowercase : int = extractor if extractor != '''deprecated''' else extractor_format
else:
lowercase : Optional[Any] = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=SCREAMING_SNAKE_CASE__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE__ ):
return extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 337 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
lowercase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]:
"""simple docstring"""
if log_level is None:
lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase )
lowercase : str = logging.getLogger(_UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_UpperCamelCase, {} )
| 337 | 1 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "[email protected]"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.